code
stringlengths 161
233k
| apis
sequencelengths 1
24
| extract_api
stringlengths 162
68.5k
|
---|---|---|
"""Answer inserter."""
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from llama_index.core.llms.llm import LLM
from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate
from llama_index.core.prompts.mixin import (
PromptDictType,
PromptMixin,
PromptMixinType,
)
from llama_index.core.query_engine.flare.schema import QueryTask
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import Settings, llm_from_settings_or_context
class BaseLookaheadAnswerInserter(PromptMixin):
"""Lookahead answer inserter.
These are responsible for insert answers into a lookahead answer template.
E.g.
lookahead answer: Red is for [Search(What is the meaning of Ghana's
flag being red?)], green for forests, and gold for mineral wealth.
query: What is the meaning of Ghana's flag being red?
query answer: "the blood of those who died in the country's struggle
for independence"
final answer: Red is for the blood of those who died in the country's
struggle for independence, green for forests, and gold for mineral wealth.
"""
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
@abstractmethod
def insert(
self,
response: str,
query_tasks: List[QueryTask],
answers: List[str],
prev_response: Optional[str] = None,
) -> str:
"""Insert answers into response."""
DEFAULT_ANSWER_INSERT_PROMPT_TMPL = """
An existing 'lookahead response' is given below. The lookahead response
contains `[Search(query)]` tags. Some queries have been executed and the
response retrieved. The queries and answers are also given below.
Also the previous response (the response before the lookahead response)
is given below.
Given the lookahead template, previous response, and also queries and answers,
please 'fill in' the lookahead template with the appropriate answers.
NOTE: Please make sure that the final response grammatically follows
the previous response + lookahead template. For example, if the previous
response is "New York City has a population of " and the lookahead
template is "[Search(What is the population of New York City?)]", then
the final response should be "8.4 million".
NOTE: the lookahead template may not be a complete sentence and may
contain trailing/leading commas, etc. Please preserve the original
formatting of the lookahead template if possible.
NOTE:
NOTE: the exception to the above rule is if the answer to a query
is equivalent to "I don't know" or "I don't have an answer". In this case,
modify the lookahead template to indicate that the answer is not known.
NOTE: the lookahead template may contain multiple `[Search(query)]` tags
and only a subset of these queries have been executed.
Do not replace the `[Search(query)]` tags that have not been executed.
Previous Response:
Lookahead Template:
Red is for [Search(What is the meaning of Ghana's \
flag being red?)], green for forests, and gold for mineral wealth.
Query-Answer Pairs:
Query: What is the meaning of Ghana's flag being red?
Answer: The red represents the blood of those who died in the country's struggle \
for independence
Filled in Answers:
Red is for the blood of those who died in the country's struggle for independence, \
green for forests, and gold for mineral wealth.
Previous Response:
One of the largest cities in the world
Lookahead Template:
, the city contains a population of [Search(What is the population \
of New York City?)]
Query-Answer Pairs:
Query: What is the population of New York City?
Answer: The population of New York City is 8.4 million
Synthesized Response:
, the city contains a population of 8.4 million
Previous Response:
the city contains a population of
Lookahead Template:
[Search(What is the population of New York City?)]
Query-Answer Pairs:
Query: What is the population of New York City?
Answer: The population of New York City is 8.4 million
Synthesized Response:
8.4 million
Previous Response:
{prev_response}
Lookahead Template:
{lookahead_response}
Query-Answer Pairs:
{query_answer_pairs}
Synthesized Response:
"""
DEFAULT_ANSWER_INSERT_PROMPT = PromptTemplate(DEFAULT_ANSWER_INSERT_PROMPT_TMPL)
class LLMLookaheadAnswerInserter(BaseLookaheadAnswerInserter):
"""LLM Lookahead answer inserter.
Takes in a lookahead response and a list of query tasks, and the
lookahead answers, and inserts the answers into the lookahead response.
"""
def __init__(
self,
llm: Optional[LLM] = None,
service_context: Optional[ServiceContext] = None,
answer_insert_prompt: Optional[BasePromptTemplate] = None,
) -> None:
"""Init params."""
self._llm = llm or llm_from_settings_or_context(Settings, service_context)
self._answer_insert_prompt = (
answer_insert_prompt or DEFAULT_ANSWER_INSERT_PROMPT
)
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {
"answer_insert_prompt": self._answer_insert_prompt,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "answer_insert_prompt" in prompts:
self._answer_insert_prompt = prompts["answer_insert_prompt"]
def insert(
self,
response: str,
query_tasks: List[QueryTask],
answers: List[str],
prev_response: Optional[str] = None,
) -> str:
"""Insert answers into response."""
prev_response = prev_response or ""
query_answer_pairs = ""
for query_task, answer in zip(query_tasks, answers):
query_answer_pairs += f"Query: {query_task.query_str}\nAnswer: {answer}\n"
return self._llm.predict(
self._answer_insert_prompt,
lookahead_response=response,
query_answer_pairs=query_answer_pairs,
prev_response=prev_response,
)
class DirectLookaheadAnswerInserter(BaseLookaheadAnswerInserter):
"""Direct lookahead answer inserter.
Simple inserter module that directly inserts answers into
the [Search(query)] tags in the lookahead response.
"""
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
def insert(
self,
response: str,
query_tasks: List[QueryTask],
answers: List[str],
prev_response: Optional[str] = None,
) -> str:
"""Insert answers into response."""
for query_task, answer in zip(query_tasks, answers):
response = (
response[: query_task.start_idx]
+ answer
+ response[query_task.end_idx + 1 :]
)
return response
| [
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.settings.llm_from_settings_or_context"
] | [((4287, 4336), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['DEFAULT_ANSWER_INSERT_PROMPT_TMPL'], {}), '(DEFAULT_ANSWER_INSERT_PROMPT_TMPL)\n', (4301, 4336), False, 'from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((4860, 4915), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4888, 4915), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n')] |
"""Retrieval evaluators."""
from typing import Any, List, Optional, Sequence, Tuple
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.core.base_retriever import BaseRetriever
from llama_index.legacy.evaluation.retrieval.base import (
BaseRetrievalEvaluator,
RetrievalEvalMode,
)
from llama_index.legacy.evaluation.retrieval.metrics_base import (
BaseRetrievalMetric,
)
from llama_index.legacy.indices.base_retriever import BaseRetriever
from llama_index.legacy.postprocessor.types import BaseNodePostprocessor
from llama_index.legacy.schema import ImageNode, TextNode
class RetrieverEvaluator(BaseRetrievalEvaluator):
"""Retriever evaluator.
This module will evaluate a retriever using a set of metrics.
Args:
metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate
retriever: Retriever to evaluate.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval.
"""
retriever: BaseRetriever = Field(..., description="Retriever to evaluate")
node_postprocessors: Optional[List[BaseNodePostprocessor]] = Field(
default=None, description="Optional post-processor"
)
def __init__(
self,
metrics: Sequence[BaseRetrievalMetric],
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(
metrics=metrics,
retriever=retriever,
node_postprocessors=node_postprocessors,
**kwargs,
)
async def _aget_retrieved_ids_and_texts(
self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT
) -> Tuple[List[str], List[str]]:
"""Get retrieved ids and texts, potentially applying a post-processor."""
retrieved_nodes = await self.retriever.aretrieve(query)
if self.node_postprocessors:
for node_postprocessor in self.node_postprocessors:
retrieved_nodes = node_postprocessor.postprocess_nodes(
retrieved_nodes, query_str=query
)
return (
[node.node.node_id for node in retrieved_nodes],
[node.node.text for node in retrieved_nodes],
)
class MultiModalRetrieverEvaluator(BaseRetrievalEvaluator):
"""Retriever evaluator.
This module will evaluate a retriever using a set of metrics.
Args:
metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate
retriever: Retriever to evaluate.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval.
"""
retriever: BaseRetriever = Field(..., description="Retriever to evaluate")
node_postprocessors: Optional[List[BaseNodePostprocessor]] = Field(
default=None, description="Optional post-processor"
)
def __init__(
self,
metrics: Sequence[BaseRetrievalMetric],
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(
metrics=metrics,
retriever=retriever,
node_postprocessors=node_postprocessors,
**kwargs,
)
async def _aget_retrieved_ids_texts(
self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT
) -> Tuple[List[str], List[str]]:
"""Get retrieved ids."""
retrieved_nodes = await self.retriever.aretrieve(query)
image_nodes: List[ImageNode] = []
text_nodes: List[TextNode] = []
if self.node_postprocessors:
for node_postprocessor in self.node_postprocessors:
retrieved_nodes = node_postprocessor.postprocess_nodes(
retrieved_nodes, query_str=query
)
for scored_node in retrieved_nodes:
node = scored_node.node
if isinstance(node, ImageNode):
image_nodes.append(node)
if node.text:
text_nodes.append(node)
if mode == "text":
return (
[node.node_id for node in text_nodes],
[node.text for node in text_nodes],
)
elif mode == "image":
return (
[node.node_id for node in image_nodes],
[node.text for node in image_nodes],
)
else:
raise ValueError("Unsupported mode.")
| [
"llama_index.legacy.bridge.pydantic.Field"
] | [((1038, 1085), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever to evaluate"""'}), "(..., description='Retriever to evaluate')\n", (1043, 1085), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1151, 1209), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Optional post-processor"""'}), "(default=None, description='Optional post-processor')\n", (1156, 1209), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2787, 2834), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever to evaluate"""'}), "(..., description='Retriever to evaluate')\n", (2792, 2834), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2900, 2958), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Optional post-processor"""'}), "(default=None, description='Optional post-processor')\n", (2905, 2958), False, 'from llama_index.legacy.bridge.pydantic import Field\n')] |
"""Retrieval evaluators."""
from typing import Any, List, Optional, Sequence, Tuple
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.core.base_retriever import BaseRetriever
from llama_index.legacy.evaluation.retrieval.base import (
BaseRetrievalEvaluator,
RetrievalEvalMode,
)
from llama_index.legacy.evaluation.retrieval.metrics_base import (
BaseRetrievalMetric,
)
from llama_index.legacy.indices.base_retriever import BaseRetriever
from llama_index.legacy.postprocessor.types import BaseNodePostprocessor
from llama_index.legacy.schema import ImageNode, TextNode
class RetrieverEvaluator(BaseRetrievalEvaluator):
"""Retriever evaluator.
This module will evaluate a retriever using a set of metrics.
Args:
metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate
retriever: Retriever to evaluate.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval.
"""
retriever: BaseRetriever = Field(..., description="Retriever to evaluate")
node_postprocessors: Optional[List[BaseNodePostprocessor]] = Field(
default=None, description="Optional post-processor"
)
def __init__(
self,
metrics: Sequence[BaseRetrievalMetric],
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(
metrics=metrics,
retriever=retriever,
node_postprocessors=node_postprocessors,
**kwargs,
)
async def _aget_retrieved_ids_and_texts(
self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT
) -> Tuple[List[str], List[str]]:
"""Get retrieved ids and texts, potentially applying a post-processor."""
retrieved_nodes = await self.retriever.aretrieve(query)
if self.node_postprocessors:
for node_postprocessor in self.node_postprocessors:
retrieved_nodes = node_postprocessor.postprocess_nodes(
retrieved_nodes, query_str=query
)
return (
[node.node.node_id for node in retrieved_nodes],
[node.node.text for node in retrieved_nodes],
)
class MultiModalRetrieverEvaluator(BaseRetrievalEvaluator):
"""Retriever evaluator.
This module will evaluate a retriever using a set of metrics.
Args:
metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate
retriever: Retriever to evaluate.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval.
"""
retriever: BaseRetriever = Field(..., description="Retriever to evaluate")
node_postprocessors: Optional[List[BaseNodePostprocessor]] = Field(
default=None, description="Optional post-processor"
)
def __init__(
self,
metrics: Sequence[BaseRetrievalMetric],
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(
metrics=metrics,
retriever=retriever,
node_postprocessors=node_postprocessors,
**kwargs,
)
async def _aget_retrieved_ids_texts(
self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT
) -> Tuple[List[str], List[str]]:
"""Get retrieved ids."""
retrieved_nodes = await self.retriever.aretrieve(query)
image_nodes: List[ImageNode] = []
text_nodes: List[TextNode] = []
if self.node_postprocessors:
for node_postprocessor in self.node_postprocessors:
retrieved_nodes = node_postprocessor.postprocess_nodes(
retrieved_nodes, query_str=query
)
for scored_node in retrieved_nodes:
node = scored_node.node
if isinstance(node, ImageNode):
image_nodes.append(node)
if node.text:
text_nodes.append(node)
if mode == "text":
return (
[node.node_id for node in text_nodes],
[node.text for node in text_nodes],
)
elif mode == "image":
return (
[node.node_id for node in image_nodes],
[node.text for node in image_nodes],
)
else:
raise ValueError("Unsupported mode.")
| [
"llama_index.legacy.bridge.pydantic.Field"
] | [((1038, 1085), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever to evaluate"""'}), "(..., description='Retriever to evaluate')\n", (1043, 1085), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1151, 1209), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Optional post-processor"""'}), "(default=None, description='Optional post-processor')\n", (1156, 1209), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2787, 2834), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever to evaluate"""'}), "(..., description='Retriever to evaluate')\n", (2792, 2834), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2900, 2958), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Optional post-processor"""'}), "(default=None, description='Optional post-processor')\n", (2905, 2958), False, 'from llama_index.legacy.bridge.pydantic import Field\n')] |
from typing import Any, List, Optional
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.legacy.embeddings.huggingface_utils import (
format_query,
format_text,
get_pooling_mode,
)
from llama_index.legacy.embeddings.pooling import Pooling
from llama_index.legacy.utils import infer_torch_device
class OptimumEmbedding(BaseEmbedding):
folder_name: str = Field(description="Folder name to load from.")
max_length: int = Field(description="Maximum length of input.")
pooling: str = Field(description="Pooling strategy. One of ['cls', 'mean'].")
normalize: str = Field(default=True, description="Normalize embeddings or not.")
query_instruction: Optional[str] = Field(
description="Instruction to prepend to query text."
)
text_instruction: Optional[str] = Field(
description="Instruction to prepend to text."
)
cache_folder: Optional[str] = Field(
description="Cache folder for huggingface files."
)
_model: Any = PrivateAttr()
_tokenizer: Any = PrivateAttr()
_device: Any = PrivateAttr()
def __init__(
self,
folder_name: str,
pooling: Optional[str] = None,
max_length: Optional[int] = None,
normalize: bool = True,
query_instruction: Optional[str] = None,
text_instruction: Optional[str] = None,
model: Optional[Any] = None,
tokenizer: Optional[Any] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
device: Optional[str] = None,
):
try:
from optimum.onnxruntime import ORTModelForFeatureExtraction
from transformers import AutoTokenizer
except ImportError:
raise ImportError(
"OptimumEmbedding requires transformers to be installed.\n"
"Please install transformers with "
"`pip install transformers optimum[exporters]`."
)
self._model = model or ORTModelForFeatureExtraction.from_pretrained(folder_name)
self._tokenizer = tokenizer or AutoTokenizer.from_pretrained(folder_name)
self._device = device or infer_torch_device()
if max_length is None:
try:
max_length = int(self._model.config.max_position_embeddings)
except Exception:
raise ValueError(
"Unable to find max_length from model config. "
"Please provide max_length."
)
if not pooling:
pooling = get_pooling_mode(model)
try:
pooling = Pooling(pooling)
except ValueError as exc:
raise NotImplementedError(
f"Pooling {pooling} unsupported, please pick one in"
f" {[p.value for p in Pooling]}."
) from exc
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
folder_name=folder_name,
max_length=max_length,
pooling=pooling,
normalize=normalize,
query_instruction=query_instruction,
text_instruction=text_instruction,
)
@classmethod
def class_name(cls) -> str:
return "OptimumEmbedding"
@classmethod
def create_and_save_optimum_model(
cls,
model_name_or_path: str,
output_path: str,
export_kwargs: Optional[dict] = None,
) -> None:
try:
from optimum.onnxruntime import ORTModelForFeatureExtraction
from transformers import AutoTokenizer
except ImportError:
raise ImportError(
"OptimumEmbedding requires transformers to be installed.\n"
"Please install transformers with "
"`pip install transformers optimum[exporters]`."
)
export_kwargs = export_kwargs or {}
model = ORTModelForFeatureExtraction.from_pretrained(
model_name_or_path, export=True, **export_kwargs
)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
model.save_pretrained(output_path)
tokenizer.save_pretrained(output_path)
print(
f"Saved optimum model to {output_path}. Use it with "
f"`embed_model = OptimumEmbedding(folder_name='{output_path}')`."
)
def _mean_pooling(self, model_output: Any, attention_mask: Any) -> Any:
"""Mean Pooling - Take attention mask into account for correct averaging."""
import torch
# First element of model_output contains all token embeddings
token_embeddings = model_output[0]
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
)
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
input_mask_expanded.sum(1), min=1e-9
)
def _cls_pooling(self, model_output: list) -> Any:
"""Use the CLS token as the pooling token."""
return model_output[0][:, 0]
def _embed(self, sentences: List[str]) -> List[List[float]]:
"""Embed sentences."""
encoded_input = self._tokenizer(
sentences,
padding=True,
max_length=self.max_length,
truncation=True,
return_tensors="pt",
)
# pop token_type_ids
encoded_input.pop("token_type_ids", None)
model_output = self._model(**encoded_input)
if self.pooling == "cls":
embeddings = self._cls_pooling(model_output)
else:
embeddings = self._mean_pooling(
model_output, encoded_input["attention_mask"].to(self._device)
)
if self.normalize:
import torch
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
return embeddings.tolist()
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
query = format_query(query, self.model_name, self.query_instruction)
return self._embed([query])[0]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding async."""
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Get text embedding async."""
return self._get_text_embedding(text)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
text = format_text(text, self.model_name, self.text_instruction)
return self._embed([text])[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
texts = [
format_text(text, self.model_name, self.text_instruction) for text in texts
]
return self._embed(texts)
| [
"llama_index.legacy.embeddings.huggingface_utils.format_query",
"llama_index.legacy.embeddings.huggingface_utils.get_pooling_mode",
"llama_index.legacy.embeddings.pooling.Pooling",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.embeddings.huggingface_utils.format_text",
"llama_index.legacy.utils.infer_torch_device"
] | [((567, 613), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Folder name to load from."""'}), "(description='Folder name to load from.')\n", (572, 613), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((636, 681), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Maximum length of input."""'}), "(description='Maximum length of input.')\n", (641, 681), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((701, 763), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Pooling strategy. One of [\'cls\', \'mean\']."""'}), '(description="Pooling strategy. One of [\'cls\', \'mean\'].")\n', (706, 763), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((785, 848), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Normalize embeddings or not."""'}), "(default=True, description='Normalize embeddings or not.')\n", (790, 848), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((888, 946), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to query text."""'}), "(description='Instruction to prepend to query text.')\n", (893, 946), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((999, 1051), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to text."""'}), "(description='Instruction to prepend to text.')\n", (1004, 1051), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1100, 1156), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Cache folder for huggingface files."""'}), "(description='Cache folder for huggingface files.')\n", (1105, 1156), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1190, 1203), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1201, 1203), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1226, 1239), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1237, 1239), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1259, 1272), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1270, 1272), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((4174, 4273), 'optimum.onnxruntime.ORTModelForFeatureExtraction.from_pretrained', 'ORTModelForFeatureExtraction.from_pretrained', (['model_name_or_path'], {'export': '(True)'}), '(model_name_or_path, export=\n True, **export_kwargs)\n', (4218, 4273), False, 'from optimum.onnxruntime import ORTModelForFeatureExtraction\n'), ((4311, 4360), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name_or_path'], {}), '(model_name_or_path)\n', (4340, 4360), False, 'from transformers import AutoTokenizer\n'), ((6290, 6350), 'llama_index.legacy.embeddings.huggingface_utils.format_query', 'format_query', (['query', 'self.model_name', 'self.query_instruction'], {}), '(query, self.model_name, self.query_instruction)\n', (6302, 6350), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((6816, 6873), 'llama_index.legacy.embeddings.huggingface_utils.format_text', 'format_text', (['text', 'self.model_name', 'self.text_instruction'], {}), '(text, self.model_name, self.text_instruction)\n', (6827, 6873), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((2218, 2275), 'optimum.onnxruntime.ORTModelForFeatureExtraction.from_pretrained', 'ORTModelForFeatureExtraction.from_pretrained', (['folder_name'], {}), '(folder_name)\n', (2262, 2275), False, 'from optimum.onnxruntime import ORTModelForFeatureExtraction\n'), ((2315, 2357), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['folder_name'], {}), '(folder_name)\n', (2344, 2357), False, 'from transformers import AutoTokenizer\n'), ((2391, 2411), 'llama_index.legacy.utils.infer_torch_device', 'infer_torch_device', ([], {}), '()\n', (2409, 2411), False, 'from llama_index.legacy.utils import infer_torch_device\n'), ((2784, 2807), 'llama_index.legacy.embeddings.huggingface_utils.get_pooling_mode', 'get_pooling_mode', (['model'], {}), '(model)\n', (2800, 2807), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((2843, 2859), 'llama_index.legacy.embeddings.pooling.Pooling', 'Pooling', (['pooling'], {}), '(pooling)\n', (2850, 2859), False, 'from llama_index.legacy.embeddings.pooling import Pooling\n'), ((5056, 5108), 'torch.sum', 'torch.sum', (['(token_embeddings * input_mask_expanded)', '(1)'], {}), '(token_embeddings * input_mask_expanded, 1)\n', (5065, 5108), False, 'import torch\n'), ((6085, 6138), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['embeddings'], {'p': '(2)', 'dim': '(1)'}), '(embeddings, p=2, dim=1)\n', (6114, 6138), False, 'import torch\n'), ((7053, 7110), 'llama_index.legacy.embeddings.huggingface_utils.format_text', 'format_text', (['text', 'self.model_name', 'self.text_instruction'], {}), '(text, self.model_name, self.text_instruction)\n', (7064, 7110), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n')] |
from typing import Any, List, Optional
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.legacy.embeddings.huggingface_utils import (
format_query,
format_text,
get_pooling_mode,
)
from llama_index.legacy.embeddings.pooling import Pooling
from llama_index.legacy.utils import infer_torch_device
class OptimumEmbedding(BaseEmbedding):
folder_name: str = Field(description="Folder name to load from.")
max_length: int = Field(description="Maximum length of input.")
pooling: str = Field(description="Pooling strategy. One of ['cls', 'mean'].")
normalize: str = Field(default=True, description="Normalize embeddings or not.")
query_instruction: Optional[str] = Field(
description="Instruction to prepend to query text."
)
text_instruction: Optional[str] = Field(
description="Instruction to prepend to text."
)
cache_folder: Optional[str] = Field(
description="Cache folder for huggingface files."
)
_model: Any = PrivateAttr()
_tokenizer: Any = PrivateAttr()
_device: Any = PrivateAttr()
def __init__(
self,
folder_name: str,
pooling: Optional[str] = None,
max_length: Optional[int] = None,
normalize: bool = True,
query_instruction: Optional[str] = None,
text_instruction: Optional[str] = None,
model: Optional[Any] = None,
tokenizer: Optional[Any] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
device: Optional[str] = None,
):
try:
from optimum.onnxruntime import ORTModelForFeatureExtraction
from transformers import AutoTokenizer
except ImportError:
raise ImportError(
"OptimumEmbedding requires transformers to be installed.\n"
"Please install transformers with "
"`pip install transformers optimum[exporters]`."
)
self._model = model or ORTModelForFeatureExtraction.from_pretrained(folder_name)
self._tokenizer = tokenizer or AutoTokenizer.from_pretrained(folder_name)
self._device = device or infer_torch_device()
if max_length is None:
try:
max_length = int(self._model.config.max_position_embeddings)
except Exception:
raise ValueError(
"Unable to find max_length from model config. "
"Please provide max_length."
)
if not pooling:
pooling = get_pooling_mode(model)
try:
pooling = Pooling(pooling)
except ValueError as exc:
raise NotImplementedError(
f"Pooling {pooling} unsupported, please pick one in"
f" {[p.value for p in Pooling]}."
) from exc
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
folder_name=folder_name,
max_length=max_length,
pooling=pooling,
normalize=normalize,
query_instruction=query_instruction,
text_instruction=text_instruction,
)
@classmethod
def class_name(cls) -> str:
return "OptimumEmbedding"
@classmethod
def create_and_save_optimum_model(
cls,
model_name_or_path: str,
output_path: str,
export_kwargs: Optional[dict] = None,
) -> None:
try:
from optimum.onnxruntime import ORTModelForFeatureExtraction
from transformers import AutoTokenizer
except ImportError:
raise ImportError(
"OptimumEmbedding requires transformers to be installed.\n"
"Please install transformers with "
"`pip install transformers optimum[exporters]`."
)
export_kwargs = export_kwargs or {}
model = ORTModelForFeatureExtraction.from_pretrained(
model_name_or_path, export=True, **export_kwargs
)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
model.save_pretrained(output_path)
tokenizer.save_pretrained(output_path)
print(
f"Saved optimum model to {output_path}. Use it with "
f"`embed_model = OptimumEmbedding(folder_name='{output_path}')`."
)
def _mean_pooling(self, model_output: Any, attention_mask: Any) -> Any:
"""Mean Pooling - Take attention mask into account for correct averaging."""
import torch
# First element of model_output contains all token embeddings
token_embeddings = model_output[0]
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
)
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
input_mask_expanded.sum(1), min=1e-9
)
def _cls_pooling(self, model_output: list) -> Any:
"""Use the CLS token as the pooling token."""
return model_output[0][:, 0]
def _embed(self, sentences: List[str]) -> List[List[float]]:
"""Embed sentences."""
encoded_input = self._tokenizer(
sentences,
padding=True,
max_length=self.max_length,
truncation=True,
return_tensors="pt",
)
# pop token_type_ids
encoded_input.pop("token_type_ids", None)
model_output = self._model(**encoded_input)
if self.pooling == "cls":
embeddings = self._cls_pooling(model_output)
else:
embeddings = self._mean_pooling(
model_output, encoded_input["attention_mask"].to(self._device)
)
if self.normalize:
import torch
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
return embeddings.tolist()
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
query = format_query(query, self.model_name, self.query_instruction)
return self._embed([query])[0]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding async."""
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Get text embedding async."""
return self._get_text_embedding(text)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
text = format_text(text, self.model_name, self.text_instruction)
return self._embed([text])[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
texts = [
format_text(text, self.model_name, self.text_instruction) for text in texts
]
return self._embed(texts)
| [
"llama_index.legacy.embeddings.huggingface_utils.format_query",
"llama_index.legacy.embeddings.huggingface_utils.get_pooling_mode",
"llama_index.legacy.embeddings.pooling.Pooling",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.embeddings.huggingface_utils.format_text",
"llama_index.legacy.utils.infer_torch_device"
] | [((567, 613), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Folder name to load from."""'}), "(description='Folder name to load from.')\n", (572, 613), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((636, 681), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Maximum length of input."""'}), "(description='Maximum length of input.')\n", (641, 681), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((701, 763), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Pooling strategy. One of [\'cls\', \'mean\']."""'}), '(description="Pooling strategy. One of [\'cls\', \'mean\'].")\n', (706, 763), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((785, 848), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Normalize embeddings or not."""'}), "(default=True, description='Normalize embeddings or not.')\n", (790, 848), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((888, 946), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to query text."""'}), "(description='Instruction to prepend to query text.')\n", (893, 946), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((999, 1051), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to text."""'}), "(description='Instruction to prepend to text.')\n", (1004, 1051), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1100, 1156), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Cache folder for huggingface files."""'}), "(description='Cache folder for huggingface files.')\n", (1105, 1156), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1190, 1203), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1201, 1203), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1226, 1239), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1237, 1239), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1259, 1272), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1270, 1272), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((4174, 4273), 'optimum.onnxruntime.ORTModelForFeatureExtraction.from_pretrained', 'ORTModelForFeatureExtraction.from_pretrained', (['model_name_or_path'], {'export': '(True)'}), '(model_name_or_path, export=\n True, **export_kwargs)\n', (4218, 4273), False, 'from optimum.onnxruntime import ORTModelForFeatureExtraction\n'), ((4311, 4360), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name_or_path'], {}), '(model_name_or_path)\n', (4340, 4360), False, 'from transformers import AutoTokenizer\n'), ((6290, 6350), 'llama_index.legacy.embeddings.huggingface_utils.format_query', 'format_query', (['query', 'self.model_name', 'self.query_instruction'], {}), '(query, self.model_name, self.query_instruction)\n', (6302, 6350), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((6816, 6873), 'llama_index.legacy.embeddings.huggingface_utils.format_text', 'format_text', (['text', 'self.model_name', 'self.text_instruction'], {}), '(text, self.model_name, self.text_instruction)\n', (6827, 6873), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((2218, 2275), 'optimum.onnxruntime.ORTModelForFeatureExtraction.from_pretrained', 'ORTModelForFeatureExtraction.from_pretrained', (['folder_name'], {}), '(folder_name)\n', (2262, 2275), False, 'from optimum.onnxruntime import ORTModelForFeatureExtraction\n'), ((2315, 2357), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['folder_name'], {}), '(folder_name)\n', (2344, 2357), False, 'from transformers import AutoTokenizer\n'), ((2391, 2411), 'llama_index.legacy.utils.infer_torch_device', 'infer_torch_device', ([], {}), '()\n', (2409, 2411), False, 'from llama_index.legacy.utils import infer_torch_device\n'), ((2784, 2807), 'llama_index.legacy.embeddings.huggingface_utils.get_pooling_mode', 'get_pooling_mode', (['model'], {}), '(model)\n', (2800, 2807), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((2843, 2859), 'llama_index.legacy.embeddings.pooling.Pooling', 'Pooling', (['pooling'], {}), '(pooling)\n', (2850, 2859), False, 'from llama_index.legacy.embeddings.pooling import Pooling\n'), ((5056, 5108), 'torch.sum', 'torch.sum', (['(token_embeddings * input_mask_expanded)', '(1)'], {}), '(token_embeddings * input_mask_expanded, 1)\n', (5065, 5108), False, 'import torch\n'), ((6085, 6138), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['embeddings'], {'p': '(2)', 'dim': '(1)'}), '(embeddings, p=2, dim=1)\n', (6114, 6138), False, 'import torch\n'), ((7053, 7110), 'llama_index.legacy.embeddings.huggingface_utils.format_text', 'format_text', (['text', 'self.model_name', 'self.text_instruction'], {}), '(text, self.model_name, self.text_instruction)\n', (7064, 7110), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n')] |
"""Autoretriever prompts."""
from llama_index.legacy.prompts.base import PromptTemplate
from llama_index.legacy.prompts.prompt_type import PromptType
from llama_index.legacy.vector_stores.types import (
FilterOperator,
MetadataFilter,
MetadataInfo,
VectorStoreInfo,
VectorStoreQuerySpec,
)
# NOTE: these prompts are inspired from langchain's self-query prompt,
# and adapted to our use case.
# https://github.com/hwchase17/langchain/tree/main/langchain/chains/query_constructor/prompt.py
PREFIX = """\
Your goal is to structure the user's query to match the request schema provided below.
<< Structured Request Schema >>
When responding use a markdown code snippet with a JSON object formatted in the \
following schema:
{schema_str}
The query string should contain only text that is expected to match the contents of \
documents. Any conditions in the filter should not be mentioned in the query as well.
Make sure that filters only refer to attributes that exist in the data source.
Make sure that filters take into account the descriptions of attributes.
Make sure that filters are only used as needed. If there are no filters that should be \
applied return [] for the filter value.\
If the user's query explicitly mentions number of documents to retrieve, set top_k to \
that number, otherwise do not set top_k.
"""
example_info_1 = VectorStoreInfo(
content_info="Lyrics of a song",
metadata_info=[
MetadataInfo(name="artist", type="str", description="Name of the song artist"),
MetadataInfo(
name="genre",
type="str",
description='The song genre, one of "pop", "rock" or "rap"',
),
],
)
example_query_1 = "What are songs by Taylor Swift or Katy Perry about teenage romance in the dance pop genre"
example_output_1 = VectorStoreQuerySpec(
query="what songs are about teenager love",
filters=[
MetadataFilter(key="artist", value="Taylor Swift"),
MetadataFilter(key="artist", value="Katy Perry"),
MetadataFilter(key="genre", value="pop"),
],
)
example_info_2 = VectorStoreInfo(
content_info="Classic literature",
metadata_info=[
MetadataInfo(name="author", type="str", description="Author name"),
MetadataInfo(
name="book_title",
type="str",
description="Book title",
),
MetadataInfo(
name="year",
type="int",
description="Year Published",
),
MetadataInfo(
name="pages",
type="int",
description="Number of pages",
),
MetadataInfo(
name="summary",
type="str",
description="A short summary of the book",
),
],
)
example_query_2 = "What are some books by Jane Austen published after 1813 that explore the theme of marriage for social standing?"
example_output_2 = VectorStoreQuerySpec(
query="What books related to theme of marriage for social standing?",
filters=[
MetadataFilter(key="year", value="1813", operator=FilterOperator.GT),
MetadataFilter(key="author", value="Jane Austen"),
],
)
EXAMPLES = f"""\
<< Example 1. >>
Data Source:
```json
{example_info_1.json(indent=4)}
```
User Query:
{example_query_1}
Structured Request:
```json
{example_output_1.json()}
<< Example 2. >>
Data Source:
```json
{example_info_2.json(indent=4)}
```
User Query:
{example_query_2}
Structured Request:
```json
{example_output_2.json()}
```
""".replace(
"{", "{{"
).replace(
"}", "}}"
)
SUFFIX = """
<< Example 3. >>
Data Source:
```json
{info_str}
```
User Query:
{query_str}
Structured Request:
"""
DEFAULT_VECTARA_QUERY_PROMPT_TMPL = PREFIX + EXAMPLES + SUFFIX
# deprecated, kept for backwards compatibility
"""Vector store query prompt."""
VectorStoreQueryPrompt = PromptTemplate
DEFAULT_VECTARA_QUERY_PROMPT = PromptTemplate(
template=DEFAULT_VECTARA_QUERY_PROMPT_TMPL,
prompt_type=PromptType.VECTOR_STORE_QUERY,
)
| [
"llama_index.legacy.vector_stores.types.MetadataInfo",
"llama_index.legacy.vector_stores.types.MetadataFilter",
"llama_index.legacy.prompts.base.PromptTemplate"
] | [((3927, 4033), 'llama_index.legacy.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_VECTARA_QUERY_PROMPT_TMPL', 'prompt_type': 'PromptType.VECTOR_STORE_QUERY'}), '(template=DEFAULT_VECTARA_QUERY_PROMPT_TMPL, prompt_type=\n PromptType.VECTOR_STORE_QUERY)\n', (3941, 4033), False, 'from llama_index.legacy.prompts.base import PromptTemplate\n'), ((1451, 1529), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""artist"""', 'type': '"""str"""', 'description': '"""Name of the song artist"""'}), "(name='artist', type='str', description='Name of the song artist')\n", (1463, 1529), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((1539, 1643), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""genre"""', 'type': '"""str"""', 'description': '"""The song genre, one of "pop", "rock" or "rap\\""""'}), '(name=\'genre\', type=\'str\', description=\n \'The song genre, one of "pop", "rock" or "rap"\')\n', (1551, 1643), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((1919, 1969), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""artist"""', 'value': '"""Taylor Swift"""'}), "(key='artist', value='Taylor Swift')\n", (1933, 1969), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((1979, 2027), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""artist"""', 'value': '"""Katy Perry"""'}), "(key='artist', value='Katy Perry')\n", (1993, 2027), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2037, 2077), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""genre"""', 'value': '"""pop"""'}), "(key='genre', value='pop')\n", (2051, 2077), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2190, 2256), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""author"""', 'type': '"""str"""', 'description': '"""Author name"""'}), "(name='author', type='str', description='Author name')\n", (2202, 2256), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2266, 2335), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""book_title"""', 'type': '"""str"""', 'description': '"""Book title"""'}), "(name='book_title', type='str', description='Book title')\n", (2278, 2335), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2392, 2459), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""year"""', 'type': '"""int"""', 'description': '"""Year Published"""'}), "(name='year', type='int', description='Year Published')\n", (2404, 2459), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2516, 2585), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""pages"""', 'type': '"""int"""', 'description': '"""Number of pages"""'}), "(name='pages', type='int', description='Number of pages')\n", (2528, 2585), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2642, 2730), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""summary"""', 'type': '"""str"""', 'description': '"""A short summary of the book"""'}), "(name='summary', type='str', description=\n 'A short summary of the book')\n", (2654, 2730), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((3054, 3122), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""year"""', 'value': '"""1813"""', 'operator': 'FilterOperator.GT'}), "(key='year', value='1813', operator=FilterOperator.GT)\n", (3068, 3122), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((3132, 3181), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""author"""', 'value': '"""Jane Austen"""'}), "(key='author', value='Jane Austen')\n", (3146, 3181), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n')] |
"""Autoretriever prompts."""
from llama_index.legacy.prompts.base import PromptTemplate
from llama_index.legacy.prompts.prompt_type import PromptType
from llama_index.legacy.vector_stores.types import (
FilterOperator,
MetadataFilter,
MetadataInfo,
VectorStoreInfo,
VectorStoreQuerySpec,
)
# NOTE: these prompts are inspired from langchain's self-query prompt,
# and adapted to our use case.
# https://github.com/hwchase17/langchain/tree/main/langchain/chains/query_constructor/prompt.py
PREFIX = """\
Your goal is to structure the user's query to match the request schema provided below.
<< Structured Request Schema >>
When responding use a markdown code snippet with a JSON object formatted in the \
following schema:
{schema_str}
The query string should contain only text that is expected to match the contents of \
documents. Any conditions in the filter should not be mentioned in the query as well.
Make sure that filters only refer to attributes that exist in the data source.
Make sure that filters take into account the descriptions of attributes.
Make sure that filters are only used as needed. If there are no filters that should be \
applied return [] for the filter value.\
If the user's query explicitly mentions number of documents to retrieve, set top_k to \
that number, otherwise do not set top_k.
"""
example_info_1 = VectorStoreInfo(
content_info="Lyrics of a song",
metadata_info=[
MetadataInfo(name="artist", type="str", description="Name of the song artist"),
MetadataInfo(
name="genre",
type="str",
description='The song genre, one of "pop", "rock" or "rap"',
),
],
)
example_query_1 = "What are songs by Taylor Swift or Katy Perry about teenage romance in the dance pop genre"
example_output_1 = VectorStoreQuerySpec(
query="what songs are about teenager love",
filters=[
MetadataFilter(key="artist", value="Taylor Swift"),
MetadataFilter(key="artist", value="Katy Perry"),
MetadataFilter(key="genre", value="pop"),
],
)
example_info_2 = VectorStoreInfo(
content_info="Classic literature",
metadata_info=[
MetadataInfo(name="author", type="str", description="Author name"),
MetadataInfo(
name="book_title",
type="str",
description="Book title",
),
MetadataInfo(
name="year",
type="int",
description="Year Published",
),
MetadataInfo(
name="pages",
type="int",
description="Number of pages",
),
MetadataInfo(
name="summary",
type="str",
description="A short summary of the book",
),
],
)
example_query_2 = "What are some books by Jane Austen published after 1813 that explore the theme of marriage for social standing?"
example_output_2 = VectorStoreQuerySpec(
query="What books related to theme of marriage for social standing?",
filters=[
MetadataFilter(key="year", value="1813", operator=FilterOperator.GT),
MetadataFilter(key="author", value="Jane Austen"),
],
)
EXAMPLES = f"""\
<< Example 1. >>
Data Source:
```json
{example_info_1.json(indent=4)}
```
User Query:
{example_query_1}
Structured Request:
```json
{example_output_1.json()}
<< Example 2. >>
Data Source:
```json
{example_info_2.json(indent=4)}
```
User Query:
{example_query_2}
Structured Request:
```json
{example_output_2.json()}
```
""".replace(
"{", "{{"
).replace(
"}", "}}"
)
SUFFIX = """
<< Example 3. >>
Data Source:
```json
{info_str}
```
User Query:
{query_str}
Structured Request:
"""
DEFAULT_VECTARA_QUERY_PROMPT_TMPL = PREFIX + EXAMPLES + SUFFIX
# deprecated, kept for backwards compatibility
"""Vector store query prompt."""
VectorStoreQueryPrompt = PromptTemplate
DEFAULT_VECTARA_QUERY_PROMPT = PromptTemplate(
template=DEFAULT_VECTARA_QUERY_PROMPT_TMPL,
prompt_type=PromptType.VECTOR_STORE_QUERY,
)
| [
"llama_index.legacy.vector_stores.types.MetadataInfo",
"llama_index.legacy.vector_stores.types.MetadataFilter",
"llama_index.legacy.prompts.base.PromptTemplate"
] | [((3927, 4033), 'llama_index.legacy.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_VECTARA_QUERY_PROMPT_TMPL', 'prompt_type': 'PromptType.VECTOR_STORE_QUERY'}), '(template=DEFAULT_VECTARA_QUERY_PROMPT_TMPL, prompt_type=\n PromptType.VECTOR_STORE_QUERY)\n', (3941, 4033), False, 'from llama_index.legacy.prompts.base import PromptTemplate\n'), ((1451, 1529), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""artist"""', 'type': '"""str"""', 'description': '"""Name of the song artist"""'}), "(name='artist', type='str', description='Name of the song artist')\n", (1463, 1529), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((1539, 1643), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""genre"""', 'type': '"""str"""', 'description': '"""The song genre, one of "pop", "rock" or "rap\\""""'}), '(name=\'genre\', type=\'str\', description=\n \'The song genre, one of "pop", "rock" or "rap"\')\n', (1551, 1643), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((1919, 1969), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""artist"""', 'value': '"""Taylor Swift"""'}), "(key='artist', value='Taylor Swift')\n", (1933, 1969), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((1979, 2027), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""artist"""', 'value': '"""Katy Perry"""'}), "(key='artist', value='Katy Perry')\n", (1993, 2027), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2037, 2077), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""genre"""', 'value': '"""pop"""'}), "(key='genre', value='pop')\n", (2051, 2077), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2190, 2256), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""author"""', 'type': '"""str"""', 'description': '"""Author name"""'}), "(name='author', type='str', description='Author name')\n", (2202, 2256), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2266, 2335), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""book_title"""', 'type': '"""str"""', 'description': '"""Book title"""'}), "(name='book_title', type='str', description='Book title')\n", (2278, 2335), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2392, 2459), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""year"""', 'type': '"""int"""', 'description': '"""Year Published"""'}), "(name='year', type='int', description='Year Published')\n", (2404, 2459), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2516, 2585), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""pages"""', 'type': '"""int"""', 'description': '"""Number of pages"""'}), "(name='pages', type='int', description='Number of pages')\n", (2528, 2585), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2642, 2730), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""summary"""', 'type': '"""str"""', 'description': '"""A short summary of the book"""'}), "(name='summary', type='str', description=\n 'A short summary of the book')\n", (2654, 2730), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((3054, 3122), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""year"""', 'value': '"""1813"""', 'operator': 'FilterOperator.GT'}), "(key='year', value='1813', operator=FilterOperator.GT)\n", (3068, 3122), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((3132, 3181), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""author"""', 'value': '"""Jane Austen"""'}), "(key='author', value='Jane Austen')\n", (3146, 3181), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n')] |
"""Faithfulness evaluation."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence, Union
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.schema import ImageNode
DEFAULT_EVAL_TEMPLATE = PromptTemplate(
"Please tell if a given piece of information "
"is supported by the visual as well as textual context information.\n"
"You need to answer with either YES or NO.\n"
"Answer YES if any of the image(s) and textual context supports the information, even "
"if most of the context is unrelated. "
"Some examples are provided below with only text context, but please do use\n"
"any images for context if they are provided.\n\n"
"Information: Apple pie is generally double-crusted.\n"
"Context: An apple pie is a fruit pie in which the principal filling "
"ingredient is apples. \n"
"Apple pie is often served with whipped cream, ice cream "
"('apple pie à la mode'), custard or cheddar cheese.\n"
"It is generally double-crusted, with pastry both above "
"and below the filling; the upper crust may be solid or "
"latticed (woven of crosswise strips).\n"
"Answer: YES\n"
"Information: Apple pies tastes bad.\n"
"Context: An apple pie is a fruit pie in which the principal filling "
"ingredient is apples. \n"
"Apple pie is often served with whipped cream, ice cream "
"('apple pie à la mode'), custard or cheddar cheese.\n"
"It is generally double-crusted, with pastry both above "
"and below the filling; the upper crust may be solid or "
"latticed (woven of crosswise strips).\n"
"Answer: NO\n"
"Information: {query_str}\n"
"Context: {context_str}\n"
"Answer: "
)
DEFAULT_REFINE_TEMPLATE = PromptTemplate(
"We want to understand if the following information is present "
"in the context information: {query_str}\n"
"We have provided an existing YES/NO answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"If the existing answer was already YES, still answer YES. "
"If the information is present in the new context, answer YES. "
"Otherwise answer NO.\n"
)
class MultiModalFaithfulnessEvaluator(BaseEvaluator):
"""Multi-Modal Faithfulness evaluator.
Evaluates whether a response is faithful to the contexts
(i.e. whether the response is supported by the contexts or hallucinated.)
This evaluator only considers the response string and the list of context strings.
Args:
multi_modal_llm(Optional[MultiModalLLM]):
The Multi-Modal LLM Judge to use for evaluations.
raise_error(bool): Whether to raise an error when the response is invalid.
Defaults to False.
eval_template(Optional[Union[str, BasePromptTemplate]]):
The template to use for evaluation.
refine_template(Optional[Union[str, BasePromptTemplate]]):
The template to use for refining the evaluation.
"""
def __init__(
self,
multi_modal_llm: Optional[MultiModalLLM] = None,
raise_error: bool = False,
eval_template: Union[str, BasePromptTemplate, None] = None,
refine_template: Union[str, BasePromptTemplate, None] = None,
) -> None:
"""Init params."""
if multi_modal_llm is None:
try:
from llama_index.multi_modal_llms.openai import (
OpenAIMultiModal,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"OpenAIMultiModal is not installed. "
"Please install it using `pip install llama-index-multi-modal-llms-openai`"
)
self._multi_modal_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", max_new_tokens=1000
)
else:
self._multi_modal_llm = multi_modal_llm
self._raise_error = raise_error
self._eval_template: BasePromptTemplate
if isinstance(eval_template, str):
self._eval_template = PromptTemplate(eval_template)
else:
self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE
self._refine_template: BasePromptTemplate
if isinstance(refine_template, str):
self._refine_template = PromptTemplate(refine_template)
else:
self._refine_template = refine_template or DEFAULT_REFINE_TEMPLATE
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {
"eval_template": self._eval_template,
"refine_template": self._refine_template,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "eval_template" in prompts:
self._eval_template = prompts["eval_template"]
if "refine_template" in prompts:
self._refine_template = prompts["refine_template"]
def evaluate(
self,
query: Union[str, None] = None,
response: Union[str, None] = None,
contexts: Union[Sequence[str], None] = None,
image_paths: Union[List[str], None] = None,
image_urls: Union[List[str], None] = None,
**kwargs: Any,
) -> EvaluationResult:
"""Evaluate whether the response is faithful to the multi-modal contexts."""
del query # Unused
del kwargs # Unused
if contexts is None or response is None:
raise ValueError("contexts and response must be provided")
context_str = "\n\n".join(contexts)
fmt_prompt = self._eval_template.format(
context_str=context_str, query_str=response
)
if image_paths:
image_nodes = [
ImageNode(image_path=image_path) for image_path in image_paths
]
if image_urls:
image_nodes = [ImageNode(image_url=image_url) for image_url in image_urls]
response_obj = self._multi_modal_llm.complete(
prompt=fmt_prompt,
image_documents=image_nodes,
)
raw_response_txt = str(response_obj)
if "yes" in raw_response_txt.lower():
passing = True
else:
passing = False
if self._raise_error:
raise ValueError("The response is invalid")
return EvaluationResult(
response=response,
contexts=contexts,
passing=passing,
score=1.0 if passing else 0.0,
feedback=raw_response_txt,
)
async def aevaluate(
self,
query: Union[str, None] = None,
response: Union[str, None] = None,
contexts: Union[Sequence[str], None] = None,
image_paths: Union[List[str], None] = None,
image_urls: Union[List[str], None] = None,
**kwargs: Any,
) -> EvaluationResult:
"""Async evaluate whether the response is faithful to the multi-modal contexts."""
del query # Unused
del kwargs # Unused
if contexts is None or response is None:
raise ValueError("contexts and response must be provided")
context_str = "\n\n".join(contexts)
fmt_prompt = self._eval_template.format(
context_str=context_str, query_str=response
)
if image_paths:
image_nodes = [
ImageNode(image_path=image_path) for image_path in image_paths
]
if image_urls:
image_nodes = [ImageNode(image_url=image_url) for image_url in image_urls]
response_obj = await self._multi_modal_llm.acomplete(
prompt=fmt_prompt,
image_documents=image_nodes,
)
raw_response_txt = str(response_obj)
if "yes" in raw_response_txt.lower():
passing = True
else:
passing = False
if self._raise_error:
raise ValueError("The response is invalid")
return EvaluationResult(
response=response,
contexts=contexts,
passing=passing,
score=1.0 if passing else 0.0,
feedback=raw_response_txt,
)
| [
"llama_index.core.prompts.PromptTemplate",
"llama_index.core.evaluation.base.EvaluationResult",
"llama_index.core.schema.ImageNode",
"llama_index.multi_modal_llms.openai.OpenAIMultiModal"
] | [((468, 1757), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please tell if a given piece of information is supported by the visual as well as textual context information.\nYou need to answer with either YES or NO.\nAnswer YES if any of the image(s) and textual context supports the information, even if most of the context is unrelated. Some examples are provided below with only text context, but please do use\nany images for context if they are provided.\n\nInformation: Apple pie is generally double-crusted.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: YES\nInformation: Apple pies tastes bad.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: NO\nInformation: {query_str}\nContext: {context_str}\nAnswer: """'], {}), '(\n """Please tell if a given piece of information is supported by the visual as well as textual context information.\nYou need to answer with either YES or NO.\nAnswer YES if any of the image(s) and textual context supports the information, even if most of the context is unrelated. Some examples are provided below with only text context, but please do use\nany images for context if they are provided.\n\nInformation: Apple pie is generally double-crusted.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: YES\nInformation: Apple pies tastes bad.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: NO\nInformation: {query_str}\nContext: {context_str}\nAnswer: """\n )\n', (482, 1757), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((1983, 2461), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""We want to understand if the following information is present in the context information: {query_str}\nWe have provided an existing YES/NO answer: {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""'], {}), '(\n """We want to understand if the following information is present in the context information: {query_str}\nWe have provided an existing YES/NO answer: {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""\n )\n', (1997, 2461), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((6740, 6873), 'llama_index.core.evaluation.base.EvaluationResult', 'EvaluationResult', ([], {'response': 'response', 'contexts': 'contexts', 'passing': 'passing', 'score': '(1.0 if passing else 0.0)', 'feedback': 'raw_response_txt'}), '(response=response, contexts=contexts, passing=passing,\n score=1.0 if passing else 0.0, feedback=raw_response_txt)\n', (6756, 6873), False, 'from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult\n'), ((8371, 8504), 'llama_index.core.evaluation.base.EvaluationResult', 'EvaluationResult', ([], {'response': 'response', 'contexts': 'contexts', 'passing': 'passing', 'score': '(1.0 if passing else 0.0)', 'feedback': 'raw_response_txt'}), '(response=response, contexts=contexts, passing=passing,\n score=1.0 if passing else 0.0, feedback=raw_response_txt)\n', (8387, 8504), False, 'from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult\n'), ((4123, 4190), 'llama_index.multi_modal_llms.openai.OpenAIMultiModal', 'OpenAIMultiModal', ([], {'model': '"""gpt-4-vision-preview"""', 'max_new_tokens': '(1000)'}), "(model='gpt-4-vision-preview', max_new_tokens=1000)\n", (4139, 4190), False, 'from llama_index.multi_modal_llms.openai import OpenAIMultiModal\n'), ((4454, 4483), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['eval_template'], {}), '(eval_template)\n', (4468, 4483), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((4703, 4734), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['refine_template'], {}), '(refine_template)\n', (4717, 4734), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((6143, 6175), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_path': 'image_path'}), '(image_path=image_path)\n', (6152, 6175), False, 'from llama_index.core.schema import ImageNode\n'), ((6270, 6300), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_url': 'image_url'}), '(image_url=image_url)\n', (6279, 6300), False, 'from llama_index.core.schema import ImageNode\n'), ((7767, 7799), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_path': 'image_path'}), '(image_path=image_path)\n', (7776, 7799), False, 'from llama_index.core.schema import ImageNode\n'), ((7894, 7924), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_url': 'image_url'}), '(image_url=image_url)\n', (7903, 7924), False, 'from llama_index.core.schema import ImageNode\n')] |
"""Faithfulness evaluation."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence, Union
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.schema import ImageNode
DEFAULT_EVAL_TEMPLATE = PromptTemplate(
"Please tell if a given piece of information "
"is supported by the visual as well as textual context information.\n"
"You need to answer with either YES or NO.\n"
"Answer YES if any of the image(s) and textual context supports the information, even "
"if most of the context is unrelated. "
"Some examples are provided below with only text context, but please do use\n"
"any images for context if they are provided.\n\n"
"Information: Apple pie is generally double-crusted.\n"
"Context: An apple pie is a fruit pie in which the principal filling "
"ingredient is apples. \n"
"Apple pie is often served with whipped cream, ice cream "
"('apple pie à la mode'), custard or cheddar cheese.\n"
"It is generally double-crusted, with pastry both above "
"and below the filling; the upper crust may be solid or "
"latticed (woven of crosswise strips).\n"
"Answer: YES\n"
"Information: Apple pies tastes bad.\n"
"Context: An apple pie is a fruit pie in which the principal filling "
"ingredient is apples. \n"
"Apple pie is often served with whipped cream, ice cream "
"('apple pie à la mode'), custard or cheddar cheese.\n"
"It is generally double-crusted, with pastry both above "
"and below the filling; the upper crust may be solid or "
"latticed (woven of crosswise strips).\n"
"Answer: NO\n"
"Information: {query_str}\n"
"Context: {context_str}\n"
"Answer: "
)
DEFAULT_REFINE_TEMPLATE = PromptTemplate(
"We want to understand if the following information is present "
"in the context information: {query_str}\n"
"We have provided an existing YES/NO answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"If the existing answer was already YES, still answer YES. "
"If the information is present in the new context, answer YES. "
"Otherwise answer NO.\n"
)
class MultiModalFaithfulnessEvaluator(BaseEvaluator):
"""Multi-Modal Faithfulness evaluator.
Evaluates whether a response is faithful to the contexts
(i.e. whether the response is supported by the contexts or hallucinated.)
This evaluator only considers the response string and the list of context strings.
Args:
multi_modal_llm(Optional[MultiModalLLM]):
The Multi-Modal LLM Judge to use for evaluations.
raise_error(bool): Whether to raise an error when the response is invalid.
Defaults to False.
eval_template(Optional[Union[str, BasePromptTemplate]]):
The template to use for evaluation.
refine_template(Optional[Union[str, BasePromptTemplate]]):
The template to use for refining the evaluation.
"""
def __init__(
self,
multi_modal_llm: Optional[MultiModalLLM] = None,
raise_error: bool = False,
eval_template: Union[str, BasePromptTemplate, None] = None,
refine_template: Union[str, BasePromptTemplate, None] = None,
) -> None:
"""Init params."""
if multi_modal_llm is None:
try:
from llama_index.multi_modal_llms.openai import (
OpenAIMultiModal,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"OpenAIMultiModal is not installed. "
"Please install it using `pip install llama-index-multi-modal-llms-openai`"
)
self._multi_modal_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", max_new_tokens=1000
)
else:
self._multi_modal_llm = multi_modal_llm
self._raise_error = raise_error
self._eval_template: BasePromptTemplate
if isinstance(eval_template, str):
self._eval_template = PromptTemplate(eval_template)
else:
self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE
self._refine_template: BasePromptTemplate
if isinstance(refine_template, str):
self._refine_template = PromptTemplate(refine_template)
else:
self._refine_template = refine_template or DEFAULT_REFINE_TEMPLATE
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {
"eval_template": self._eval_template,
"refine_template": self._refine_template,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "eval_template" in prompts:
self._eval_template = prompts["eval_template"]
if "refine_template" in prompts:
self._refine_template = prompts["refine_template"]
def evaluate(
self,
query: Union[str, None] = None,
response: Union[str, None] = None,
contexts: Union[Sequence[str], None] = None,
image_paths: Union[List[str], None] = None,
image_urls: Union[List[str], None] = None,
**kwargs: Any,
) -> EvaluationResult:
"""Evaluate whether the response is faithful to the multi-modal contexts."""
del query # Unused
del kwargs # Unused
if contexts is None or response is None:
raise ValueError("contexts and response must be provided")
context_str = "\n\n".join(contexts)
fmt_prompt = self._eval_template.format(
context_str=context_str, query_str=response
)
if image_paths:
image_nodes = [
ImageNode(image_path=image_path) for image_path in image_paths
]
if image_urls:
image_nodes = [ImageNode(image_url=image_url) for image_url in image_urls]
response_obj = self._multi_modal_llm.complete(
prompt=fmt_prompt,
image_documents=image_nodes,
)
raw_response_txt = str(response_obj)
if "yes" in raw_response_txt.lower():
passing = True
else:
passing = False
if self._raise_error:
raise ValueError("The response is invalid")
return EvaluationResult(
response=response,
contexts=contexts,
passing=passing,
score=1.0 if passing else 0.0,
feedback=raw_response_txt,
)
async def aevaluate(
self,
query: Union[str, None] = None,
response: Union[str, None] = None,
contexts: Union[Sequence[str], None] = None,
image_paths: Union[List[str], None] = None,
image_urls: Union[List[str], None] = None,
**kwargs: Any,
) -> EvaluationResult:
"""Async evaluate whether the response is faithful to the multi-modal contexts."""
del query # Unused
del kwargs # Unused
if contexts is None or response is None:
raise ValueError("contexts and response must be provided")
context_str = "\n\n".join(contexts)
fmt_prompt = self._eval_template.format(
context_str=context_str, query_str=response
)
if image_paths:
image_nodes = [
ImageNode(image_path=image_path) for image_path in image_paths
]
if image_urls:
image_nodes = [ImageNode(image_url=image_url) for image_url in image_urls]
response_obj = await self._multi_modal_llm.acomplete(
prompt=fmt_prompt,
image_documents=image_nodes,
)
raw_response_txt = str(response_obj)
if "yes" in raw_response_txt.lower():
passing = True
else:
passing = False
if self._raise_error:
raise ValueError("The response is invalid")
return EvaluationResult(
response=response,
contexts=contexts,
passing=passing,
score=1.0 if passing else 0.0,
feedback=raw_response_txt,
)
| [
"llama_index.core.prompts.PromptTemplate",
"llama_index.core.evaluation.base.EvaluationResult",
"llama_index.core.schema.ImageNode",
"llama_index.multi_modal_llms.openai.OpenAIMultiModal"
] | [((468, 1757), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please tell if a given piece of information is supported by the visual as well as textual context information.\nYou need to answer with either YES or NO.\nAnswer YES if any of the image(s) and textual context supports the information, even if most of the context is unrelated. Some examples are provided below with only text context, but please do use\nany images for context if they are provided.\n\nInformation: Apple pie is generally double-crusted.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: YES\nInformation: Apple pies tastes bad.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: NO\nInformation: {query_str}\nContext: {context_str}\nAnswer: """'], {}), '(\n """Please tell if a given piece of information is supported by the visual as well as textual context information.\nYou need to answer with either YES or NO.\nAnswer YES if any of the image(s) and textual context supports the information, even if most of the context is unrelated. Some examples are provided below with only text context, but please do use\nany images for context if they are provided.\n\nInformation: Apple pie is generally double-crusted.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: YES\nInformation: Apple pies tastes bad.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: NO\nInformation: {query_str}\nContext: {context_str}\nAnswer: """\n )\n', (482, 1757), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((1983, 2461), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""We want to understand if the following information is present in the context information: {query_str}\nWe have provided an existing YES/NO answer: {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""'], {}), '(\n """We want to understand if the following information is present in the context information: {query_str}\nWe have provided an existing YES/NO answer: {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""\n )\n', (1997, 2461), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((6740, 6873), 'llama_index.core.evaluation.base.EvaluationResult', 'EvaluationResult', ([], {'response': 'response', 'contexts': 'contexts', 'passing': 'passing', 'score': '(1.0 if passing else 0.0)', 'feedback': 'raw_response_txt'}), '(response=response, contexts=contexts, passing=passing,\n score=1.0 if passing else 0.0, feedback=raw_response_txt)\n', (6756, 6873), False, 'from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult\n'), ((8371, 8504), 'llama_index.core.evaluation.base.EvaluationResult', 'EvaluationResult', ([], {'response': 'response', 'contexts': 'contexts', 'passing': 'passing', 'score': '(1.0 if passing else 0.0)', 'feedback': 'raw_response_txt'}), '(response=response, contexts=contexts, passing=passing,\n score=1.0 if passing else 0.0, feedback=raw_response_txt)\n', (8387, 8504), False, 'from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult\n'), ((4123, 4190), 'llama_index.multi_modal_llms.openai.OpenAIMultiModal', 'OpenAIMultiModal', ([], {'model': '"""gpt-4-vision-preview"""', 'max_new_tokens': '(1000)'}), "(model='gpt-4-vision-preview', max_new_tokens=1000)\n", (4139, 4190), False, 'from llama_index.multi_modal_llms.openai import OpenAIMultiModal\n'), ((4454, 4483), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['eval_template'], {}), '(eval_template)\n', (4468, 4483), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((4703, 4734), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['refine_template'], {}), '(refine_template)\n', (4717, 4734), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((6143, 6175), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_path': 'image_path'}), '(image_path=image_path)\n', (6152, 6175), False, 'from llama_index.core.schema import ImageNode\n'), ((6270, 6300), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_url': 'image_url'}), '(image_url=image_url)\n', (6279, 6300), False, 'from llama_index.core.schema import ImageNode\n'), ((7767, 7799), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_path': 'image_path'}), '(image_path=image_path)\n', (7776, 7799), False, 'from llama_index.core.schema import ImageNode\n'), ((7894, 7924), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_url': 'image_url'}), '(image_url=image_url)\n', (7903, 7924), False, 'from llama_index.core.schema import ImageNode\n')] |
"""Faithfulness evaluation."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence, Union
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.schema import ImageNode
DEFAULT_EVAL_TEMPLATE = PromptTemplate(
"Please tell if a given piece of information "
"is supported by the visual as well as textual context information.\n"
"You need to answer with either YES or NO.\n"
"Answer YES if any of the image(s) and textual context supports the information, even "
"if most of the context is unrelated. "
"Some examples are provided below with only text context, but please do use\n"
"any images for context if they are provided.\n\n"
"Information: Apple pie is generally double-crusted.\n"
"Context: An apple pie is a fruit pie in which the principal filling "
"ingredient is apples. \n"
"Apple pie is often served with whipped cream, ice cream "
"('apple pie à la mode'), custard or cheddar cheese.\n"
"It is generally double-crusted, with pastry both above "
"and below the filling; the upper crust may be solid or "
"latticed (woven of crosswise strips).\n"
"Answer: YES\n"
"Information: Apple pies tastes bad.\n"
"Context: An apple pie is a fruit pie in which the principal filling "
"ingredient is apples. \n"
"Apple pie is often served with whipped cream, ice cream "
"('apple pie à la mode'), custard or cheddar cheese.\n"
"It is generally double-crusted, with pastry both above "
"and below the filling; the upper crust may be solid or "
"latticed (woven of crosswise strips).\n"
"Answer: NO\n"
"Information: {query_str}\n"
"Context: {context_str}\n"
"Answer: "
)
DEFAULT_REFINE_TEMPLATE = PromptTemplate(
"We want to understand if the following information is present "
"in the context information: {query_str}\n"
"We have provided an existing YES/NO answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"If the existing answer was already YES, still answer YES. "
"If the information is present in the new context, answer YES. "
"Otherwise answer NO.\n"
)
class MultiModalFaithfulnessEvaluator(BaseEvaluator):
"""Multi-Modal Faithfulness evaluator.
Evaluates whether a response is faithful to the contexts
(i.e. whether the response is supported by the contexts or hallucinated.)
This evaluator only considers the response string and the list of context strings.
Args:
multi_modal_llm(Optional[MultiModalLLM]):
The Multi-Modal LLM Judge to use for evaluations.
raise_error(bool): Whether to raise an error when the response is invalid.
Defaults to False.
eval_template(Optional[Union[str, BasePromptTemplate]]):
The template to use for evaluation.
refine_template(Optional[Union[str, BasePromptTemplate]]):
The template to use for refining the evaluation.
"""
def __init__(
self,
multi_modal_llm: Optional[MultiModalLLM] = None,
raise_error: bool = False,
eval_template: Union[str, BasePromptTemplate, None] = None,
refine_template: Union[str, BasePromptTemplate, None] = None,
) -> None:
"""Init params."""
if multi_modal_llm is None:
try:
from llama_index.multi_modal_llms.openai import (
OpenAIMultiModal,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"OpenAIMultiModal is not installed. "
"Please install it using `pip install llama-index-multi-modal-llms-openai`"
)
self._multi_modal_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", max_new_tokens=1000
)
else:
self._multi_modal_llm = multi_modal_llm
self._raise_error = raise_error
self._eval_template: BasePromptTemplate
if isinstance(eval_template, str):
self._eval_template = PromptTemplate(eval_template)
else:
self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE
self._refine_template: BasePromptTemplate
if isinstance(refine_template, str):
self._refine_template = PromptTemplate(refine_template)
else:
self._refine_template = refine_template or DEFAULT_REFINE_TEMPLATE
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {
"eval_template": self._eval_template,
"refine_template": self._refine_template,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "eval_template" in prompts:
self._eval_template = prompts["eval_template"]
if "refine_template" in prompts:
self._refine_template = prompts["refine_template"]
def evaluate(
self,
query: Union[str, None] = None,
response: Union[str, None] = None,
contexts: Union[Sequence[str], None] = None,
image_paths: Union[List[str], None] = None,
image_urls: Union[List[str], None] = None,
**kwargs: Any,
) -> EvaluationResult:
"""Evaluate whether the response is faithful to the multi-modal contexts."""
del query # Unused
del kwargs # Unused
if contexts is None or response is None:
raise ValueError("contexts and response must be provided")
context_str = "\n\n".join(contexts)
fmt_prompt = self._eval_template.format(
context_str=context_str, query_str=response
)
if image_paths:
image_nodes = [
ImageNode(image_path=image_path) for image_path in image_paths
]
if image_urls:
image_nodes = [ImageNode(image_url=image_url) for image_url in image_urls]
response_obj = self._multi_modal_llm.complete(
prompt=fmt_prompt,
image_documents=image_nodes,
)
raw_response_txt = str(response_obj)
if "yes" in raw_response_txt.lower():
passing = True
else:
passing = False
if self._raise_error:
raise ValueError("The response is invalid")
return EvaluationResult(
response=response,
contexts=contexts,
passing=passing,
score=1.0 if passing else 0.0,
feedback=raw_response_txt,
)
async def aevaluate(
self,
query: Union[str, None] = None,
response: Union[str, None] = None,
contexts: Union[Sequence[str], None] = None,
image_paths: Union[List[str], None] = None,
image_urls: Union[List[str], None] = None,
**kwargs: Any,
) -> EvaluationResult:
"""Async evaluate whether the response is faithful to the multi-modal contexts."""
del query # Unused
del kwargs # Unused
if contexts is None or response is None:
raise ValueError("contexts and response must be provided")
context_str = "\n\n".join(contexts)
fmt_prompt = self._eval_template.format(
context_str=context_str, query_str=response
)
if image_paths:
image_nodes = [
ImageNode(image_path=image_path) for image_path in image_paths
]
if image_urls:
image_nodes = [ImageNode(image_url=image_url) for image_url in image_urls]
response_obj = await self._multi_modal_llm.acomplete(
prompt=fmt_prompt,
image_documents=image_nodes,
)
raw_response_txt = str(response_obj)
if "yes" in raw_response_txt.lower():
passing = True
else:
passing = False
if self._raise_error:
raise ValueError("The response is invalid")
return EvaluationResult(
response=response,
contexts=contexts,
passing=passing,
score=1.0 if passing else 0.0,
feedback=raw_response_txt,
)
| [
"llama_index.core.prompts.PromptTemplate",
"llama_index.core.evaluation.base.EvaluationResult",
"llama_index.core.schema.ImageNode",
"llama_index.multi_modal_llms.openai.OpenAIMultiModal"
] | [((468, 1757), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please tell if a given piece of information is supported by the visual as well as textual context information.\nYou need to answer with either YES or NO.\nAnswer YES if any of the image(s) and textual context supports the information, even if most of the context is unrelated. Some examples are provided below with only text context, but please do use\nany images for context if they are provided.\n\nInformation: Apple pie is generally double-crusted.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: YES\nInformation: Apple pies tastes bad.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: NO\nInformation: {query_str}\nContext: {context_str}\nAnswer: """'], {}), '(\n """Please tell if a given piece of information is supported by the visual as well as textual context information.\nYou need to answer with either YES or NO.\nAnswer YES if any of the image(s) and textual context supports the information, even if most of the context is unrelated. Some examples are provided below with only text context, but please do use\nany images for context if they are provided.\n\nInformation: Apple pie is generally double-crusted.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: YES\nInformation: Apple pies tastes bad.\nContext: An apple pie is a fruit pie in which the principal filling ingredient is apples. \nApple pie is often served with whipped cream, ice cream (\'apple pie à la mode\'), custard or cheddar cheese.\nIt is generally double-crusted, with pastry both above and below the filling; the upper crust may be solid or latticed (woven of crosswise strips).\nAnswer: NO\nInformation: {query_str}\nContext: {context_str}\nAnswer: """\n )\n', (482, 1757), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((1983, 2461), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""We want to understand if the following information is present in the context information: {query_str}\nWe have provided an existing YES/NO answer: {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""'], {}), '(\n """We want to understand if the following information is present in the context information: {query_str}\nWe have provided an existing YES/NO answer: {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""\n )\n', (1997, 2461), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((6740, 6873), 'llama_index.core.evaluation.base.EvaluationResult', 'EvaluationResult', ([], {'response': 'response', 'contexts': 'contexts', 'passing': 'passing', 'score': '(1.0 if passing else 0.0)', 'feedback': 'raw_response_txt'}), '(response=response, contexts=contexts, passing=passing,\n score=1.0 if passing else 0.0, feedback=raw_response_txt)\n', (6756, 6873), False, 'from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult\n'), ((8371, 8504), 'llama_index.core.evaluation.base.EvaluationResult', 'EvaluationResult', ([], {'response': 'response', 'contexts': 'contexts', 'passing': 'passing', 'score': '(1.0 if passing else 0.0)', 'feedback': 'raw_response_txt'}), '(response=response, contexts=contexts, passing=passing,\n score=1.0 if passing else 0.0, feedback=raw_response_txt)\n', (8387, 8504), False, 'from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult\n'), ((4123, 4190), 'llama_index.multi_modal_llms.openai.OpenAIMultiModal', 'OpenAIMultiModal', ([], {'model': '"""gpt-4-vision-preview"""', 'max_new_tokens': '(1000)'}), "(model='gpt-4-vision-preview', max_new_tokens=1000)\n", (4139, 4190), False, 'from llama_index.multi_modal_llms.openai import OpenAIMultiModal\n'), ((4454, 4483), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['eval_template'], {}), '(eval_template)\n', (4468, 4483), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((4703, 4734), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['refine_template'], {}), '(refine_template)\n', (4717, 4734), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((6143, 6175), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_path': 'image_path'}), '(image_path=image_path)\n', (6152, 6175), False, 'from llama_index.core.schema import ImageNode\n'), ((6270, 6300), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_url': 'image_url'}), '(image_url=image_url)\n', (6279, 6300), False, 'from llama_index.core.schema import ImageNode\n'), ((7767, 7799), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_path': 'image_path'}), '(image_path=image_path)\n', (7776, 7799), False, 'from llama_index.core.schema import ImageNode\n'), ((7894, 7924), 'llama_index.core.schema.ImageNode', 'ImageNode', ([], {'image_url': 'image_url'}), '(image_url=image_url)\n', (7903, 7924), False, 'from llama_index.core.schema import ImageNode\n')] |
"""RAG LLM."""
from typing import Any
from llama_index.core.constants import DEFAULT_CONTEXT_WINDOW
from llama_index.core.llms import (
CompletionResponse,
CompletionResponseGen,
CustomLLM,
LLMMetadata,
)
from llama_index.core.llms.callbacks import llm_completion_callback
from pydantic import Field
from metagpt.config2 import config
from metagpt.llm import LLM
from metagpt.provider.base_llm import BaseLLM
from metagpt.utils.async_helper import run_coroutine_in_new_loop
from metagpt.utils.token_counter import TOKEN_MAX
class RAGLLM(CustomLLM):
"""LlamaIndex's LLM is different from MetaGPT's LLM.
Inherit CustomLLM from llamaindex, making MetaGPT's LLM can be used by LlamaIndex.
"""
model_infer: BaseLLM = Field(..., description="The MetaGPT's LLM.")
context_window: int = TOKEN_MAX.get(config.llm.model, DEFAULT_CONTEXT_WINDOW)
num_output: int = config.llm.max_token
model_name: str = config.llm.model
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
return LLMMetadata(context_window=self.context_window, num_output=self.num_output, model_name=self.model_name)
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
return run_coroutine_in_new_loop(self.acomplete(prompt, **kwargs))
@llm_completion_callback()
async def acomplete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> CompletionResponse:
text = await self.model_infer.aask(msg=prompt, stream=False)
return CompletionResponse(text=text)
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
...
def get_rag_llm(model_infer: BaseLLM = None) -> RAGLLM:
"""Get llm that can be used by LlamaIndex."""
return RAGLLM(model_infer=model_infer or LLM())
| [
"llama_index.core.llms.CompletionResponse",
"llama_index.core.llms.callbacks.llm_completion_callback",
"llama_index.core.llms.LLMMetadata"
] | [((751, 795), 'pydantic.Field', 'Field', (['...'], {'description': '"""The MetaGPT\'s LLM."""'}), '(..., description="The MetaGPT\'s LLM.")\n', (756, 795), False, 'from pydantic import Field\n'), ((822, 877), 'metagpt.utils.token_counter.TOKEN_MAX.get', 'TOKEN_MAX.get', (['config.llm.model', 'DEFAULT_CONTEXT_WINDOW'], {}), '(config.llm.model, DEFAULT_CONTEXT_WINDOW)\n', (835, 877), False, 'from metagpt.utils.token_counter import TOKEN_MAX\n'), ((1171, 1196), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1194, 1196), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1352, 1377), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1375, 1377), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1604, 1629), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1627, 1629), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1061, 1168), 'llama_index.core.llms.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'self.num_output', 'model_name': 'self.model_name'}), '(context_window=self.context_window, num_output=self.num_output,\n model_name=self.model_name)\n', (1072, 1168), False, 'from llama_index.core.llms import CompletionResponse, CompletionResponseGen, CustomLLM, LLMMetadata\n'), ((1568, 1597), 'llama_index.core.llms.CompletionResponse', 'CompletionResponse', ([], {'text': 'text'}), '(text=text)\n', (1586, 1597), False, 'from llama_index.core.llms import CompletionResponse, CompletionResponseGen, CustomLLM, LLMMetadata\n'), ((1879, 1884), 'metagpt.llm.LLM', 'LLM', ([], {}), '()\n', (1882, 1884), False, 'from metagpt.llm import LLM\n')] |
import os
from configparser import ConfigParser, SectionProxy
from typing import Any, Type
from llama_index import (
LLMPredictor,
ServiceContext,
VectorStoreIndex,
)
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices import SimpleKeywordTableIndex
from llama_index.indices.base import BaseIndex
from llama_index.indices.loading import load_index_from_storage
from llama_index.llm_predictor import StructuredLLMPredictor
from llama_index.llms.llm import LLM
from llama_index.llms.openai import OpenAI
from llama_index.storage.storage_context import StorageContext
CONFIG_FILE_NAME = "config.ini"
DEFAULT_PERSIST_DIR = "./storage"
DEFAULT_CONFIG = {
"store": {"persist_dir": DEFAULT_PERSIST_DIR},
"index": {"type": "default"},
"embed_model": {"type": "default"},
"llm_predictor": {"type": "default"},
}
def load_config(root: str = ".") -> ConfigParser:
"""Load configuration from file."""
config = ConfigParser()
config.read_dict(DEFAULT_CONFIG)
config.read(os.path.join(root, CONFIG_FILE_NAME))
return config
def save_config(config: ConfigParser, root: str = ".") -> None:
"""Load configuration to file."""
with open(os.path.join(root, CONFIG_FILE_NAME), "w") as fd:
config.write(fd)
def load_index(root: str = ".") -> BaseIndex[Any]:
"""Load existing index file."""
config = load_config(root)
service_context = _load_service_context(config)
# Index type
index_type: Type
if config["index"]["type"] == "default" or config["index"]["type"] == "vector":
index_type = VectorStoreIndex
elif config["index"]["type"] == "keyword":
index_type = SimpleKeywordTableIndex
else:
raise KeyError(f"Unknown index.type {config['index']['type']}")
try:
# try loading index
storage_context = _load_storage_context(config)
index = load_index_from_storage(storage_context)
except ValueError:
# build index
storage_context = StorageContext.from_defaults()
index = index_type(
nodes=[], service_context=service_context, storage_context=storage_context
)
return index
def save_index(index: BaseIndex[Any], root: str = ".") -> None:
"""Save index to file."""
config = load_config(root)
persist_dir = config["store"]["persist_dir"]
index.storage_context.persist(persist_dir=persist_dir)
def _load_service_context(config: ConfigParser) -> ServiceContext:
"""Internal function to load service context based on configuration."""
embed_model = _load_embed_model(config)
llm_predictor = _load_llm_predictor(config)
return ServiceContext.from_defaults(
llm_predictor=llm_predictor, embed_model=embed_model
)
def _load_storage_context(config: ConfigParser) -> StorageContext:
persist_dir = config["store"]["persist_dir"]
return StorageContext.from_defaults(persist_dir=persist_dir)
def _load_llm_predictor(config: ConfigParser) -> LLMPredictor:
"""Internal function to load LLM predictor based on configuration."""
model_type = config["llm_predictor"]["type"].lower()
if model_type == "default":
llm = _load_llm(config["llm_predictor"])
return LLMPredictor(llm=llm)
elif model_type == "structured":
llm = _load_llm(config["llm_predictor"])
return StructuredLLMPredictor(llm=llm)
else:
raise KeyError("llm_predictor.type")
def _load_llm(section: SectionProxy) -> LLM:
if "engine" in section:
return OpenAI(engine=section["engine"])
else:
return OpenAI()
def _load_embed_model(config: ConfigParser) -> BaseEmbedding:
"""Internal function to load embedding model based on configuration."""
model_type = config["embed_model"]["type"]
if model_type == "default":
return OpenAIEmbedding()
else:
raise KeyError("embed_model.type")
| [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.llm_predictor.StructuredLLMPredictor",
"llama_index.llms.openai.OpenAI",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1023, 1037), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1035, 1037), False, 'from configparser import ConfigParser, SectionProxy\n'), ((2725, 2812), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model)\n', (2753, 2812), False, 'from llama_index import LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((2951, 3004), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (2979, 3004), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1091, 1127), 'os.path.join', 'os.path.join', (['root', 'CONFIG_FILE_NAME'], {}), '(root, CONFIG_FILE_NAME)\n', (1103, 1127), False, 'import os\n'), ((1957, 1997), 'llama_index.indices.loading.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1980, 1997), False, 'from llama_index.indices.loading import load_index_from_storage\n'), ((3297, 3318), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3309, 3318), False, 'from llama_index import LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((3597, 3629), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'engine': "section['engine']"}), "(engine=section['engine'])\n", (3603, 3629), False, 'from llama_index.llms.openai import OpenAI\n'), ((3655, 3663), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (3661, 3663), False, 'from llama_index.llms.openai import OpenAI\n'), ((3898, 3915), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (3913, 3915), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((1265, 1301), 'os.path.join', 'os.path.join', (['root', 'CONFIG_FILE_NAME'], {}), '(root, CONFIG_FILE_NAME)\n', (1277, 1301), False, 'import os\n'), ((2069, 2099), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (2097, 2099), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((3420, 3451), 'llama_index.llm_predictor.StructuredLLMPredictor', 'StructuredLLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3442, 3451), False, 'from llama_index.llm_predictor import StructuredLLMPredictor\n')] |
import logging
import tempfile
from pathlib import Path
from typing import AnyStr, BinaryIO
from injector import inject, singleton
from llama_index.core.node_parser import SentenceWindowNodeParser
from llama_index.core.storage import StorageContext
from private_gpt.components.embedding.embedding_component import EmbeddingComponent
from private_gpt.components.ingest.ingest_component import get_ingestion_component
from private_gpt.components.llm.llm_component import LLMComponent
from private_gpt.components.node_store.node_store_component import NodeStoreComponent
from private_gpt.components.vector_store.vector_store_component import (
VectorStoreComponent,
)
from private_gpt.server.ingest.model import IngestedDoc
from private_gpt.settings.settings import settings
logger = logging.getLogger(__name__)
@singleton
class IngestService:
@inject
def __init__(
self,
llm_component: LLMComponent,
vector_store_component: VectorStoreComponent,
embedding_component: EmbeddingComponent,
node_store_component: NodeStoreComponent,
) -> None:
self.llm_service = llm_component
self.storage_context = StorageContext.from_defaults(
vector_store=vector_store_component.vector_store,
docstore=node_store_component.doc_store,
index_store=node_store_component.index_store,
)
node_parser = SentenceWindowNodeParser.from_defaults()
self.ingest_component = get_ingestion_component(
self.storage_context,
embed_model=embedding_component.embedding_model,
transformations=[node_parser, embedding_component.embedding_model],
settings=settings(),
)
def _ingest_data(self, file_name: str, file_data: AnyStr) -> list[IngestedDoc]:
logger.debug("Got file data of size=%s to ingest", len(file_data))
# llama-index mainly supports reading from files, so
# we have to create a tmp file to read for it to work
# delete=False to avoid a Windows 11 permission error.
with tempfile.NamedTemporaryFile(delete=False) as tmp:
try:
path_to_tmp = Path(tmp.name)
if isinstance(file_data, bytes):
path_to_tmp.write_bytes(file_data)
else:
path_to_tmp.write_text(str(file_data))
return self.ingest_file(file_name, path_to_tmp)
finally:
tmp.close()
path_to_tmp.unlink()
def ingest_file(self, file_name: str, file_data: Path) -> list[IngestedDoc]:
logger.info("Ingesting file_name=%s", file_name)
documents = self.ingest_component.ingest(file_name, file_data)
logger.info("Finished ingestion file_name=%s", file_name)
return [IngestedDoc.from_document(document) for document in documents]
def ingest_text(self, file_name: str, text: str) -> list[IngestedDoc]:
logger.debug("Ingesting text data with file_name=%s", file_name)
return self._ingest_data(file_name, text)
def ingest_bin_data(
self, file_name: str, raw_file_data: BinaryIO
) -> list[IngestedDoc]:
logger.debug("Ingesting binary data with file_name=%s", file_name)
file_data = raw_file_data.read()
return self._ingest_data(file_name, file_data)
def bulk_ingest(self, files: list[tuple[str, Path]]) -> list[IngestedDoc]:
logger.info("Ingesting file_names=%s", [f[0] for f in files])
documents = self.ingest_component.bulk_ingest(files)
logger.info("Finished ingestion file_name=%s", [f[0] for f in files])
return [IngestedDoc.from_document(document) for document in documents]
def list_ingested(self) -> list[IngestedDoc]:
ingested_docs = []
try:
docstore = self.storage_context.docstore
ingested_docs_ids: set[str] = set()
for node in docstore.docs.values():
if node.ref_doc_id is not None:
ingested_docs_ids.add(node.ref_doc_id)
for doc_id in ingested_docs_ids:
ref_doc_info = docstore.get_ref_doc_info(ref_doc_id=doc_id)
doc_metadata = None
if ref_doc_info is not None and ref_doc_info.metadata is not None:
doc_metadata = IngestedDoc.curate_metadata(ref_doc_info.metadata)
ingested_docs.append(
IngestedDoc(
object="ingest.document",
doc_id=doc_id,
doc_metadata=doc_metadata,
)
)
except ValueError:
logger.warning("Got an exception when getting list of docs", exc_info=True)
pass
logger.debug("Found count=%s ingested documents", len(ingested_docs))
return ingested_docs
def delete(self, doc_id: str) -> None:
"""Delete an ingested document.
:raises ValueError: if the document does not exist
"""
logger.info(
"Deleting the ingested document=%s in the doc and index store", doc_id
)
self.ingest_component.delete(doc_id)
| [
"llama_index.core.storage.StorageContext.from_defaults",
"llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults"
] | [((788, 815), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (805, 815), False, 'import logging\n'), ((1171, 1346), 'llama_index.core.storage.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store_component.vector_store', 'docstore': 'node_store_component.doc_store', 'index_store': 'node_store_component.index_store'}), '(vector_store=vector_store_component.\n vector_store, docstore=node_store_component.doc_store, index_store=\n node_store_component.index_store)\n', (1199, 1346), False, 'from llama_index.core.storage import StorageContext\n'), ((1406, 1446), 'llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {}), '()\n', (1444, 1446), False, 'from llama_index.core.node_parser import SentenceWindowNodeParser\n'), ((2082, 2123), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2109, 2123), False, 'import tempfile\n'), ((2821, 2856), 'private_gpt.server.ingest.model.IngestedDoc.from_document', 'IngestedDoc.from_document', (['document'], {}), '(document)\n', (2846, 2856), False, 'from private_gpt.server.ingest.model import IngestedDoc\n'), ((3667, 3702), 'private_gpt.server.ingest.model.IngestedDoc.from_document', 'IngestedDoc.from_document', (['document'], {}), '(document)\n', (3692, 3702), False, 'from private_gpt.server.ingest.model import IngestedDoc\n'), ((1701, 1711), 'private_gpt.settings.settings.settings', 'settings', ([], {}), '()\n', (1709, 1711), False, 'from private_gpt.settings.settings import settings\n'), ((2179, 2193), 'pathlib.Path', 'Path', (['tmp.name'], {}), '(tmp.name)\n', (2183, 2193), False, 'from pathlib import Path\n'), ((4354, 4404), 'private_gpt.server.ingest.model.IngestedDoc.curate_metadata', 'IngestedDoc.curate_metadata', (['ref_doc_info.metadata'], {}), '(ref_doc_info.metadata)\n', (4381, 4404), False, 'from private_gpt.server.ingest.model import IngestedDoc\n'), ((4463, 4542), 'private_gpt.server.ingest.model.IngestedDoc', 'IngestedDoc', ([], {'object': '"""ingest.document"""', 'doc_id': 'doc_id', 'doc_metadata': 'doc_metadata'}), "(object='ingest.document', doc_id=doc_id, doc_metadata=doc_metadata)\n", (4474, 4542), False, 'from private_gpt.server.ingest.model import IngestedDoc\n')] |
import logging
from llama_index.prompts.prompts import QuestionAnswerPrompt
QUESTION_ANSWER_PROMPT_TMPL_CN = (
"上下文信息如下所示: \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"我的问题是:{query_str}\n"
)
QUESTION_ANSWER_PROMPT_TMPL_EN = (
"Context information is below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"My question is {query_str}\n"
)
def get_prompt_template(language='zh'):
if language == 'en':
logging.info('=====> Use English prompt template!')
return QuestionAnswerPrompt(QUESTION_ANSWER_PROMPT_TMPL_EN)
else:
logging.info('=====> Use Chinese prompt template!')
return QuestionAnswerPrompt(QUESTION_ANSWER_PROMPT_TMPL_CN) | [
"llama_index.prompts.prompts.QuestionAnswerPrompt"
] | [((510, 561), 'logging.info', 'logging.info', (['"""=====> Use English prompt template!"""'], {}), "('=====> Use English prompt template!')\n", (522, 561), False, 'import logging\n'), ((577, 629), 'llama_index.prompts.prompts.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['QUESTION_ANSWER_PROMPT_TMPL_EN'], {}), '(QUESTION_ANSWER_PROMPT_TMPL_EN)\n', (597, 629), False, 'from llama_index.prompts.prompts import QuestionAnswerPrompt\n'), ((648, 699), 'logging.info', 'logging.info', (['"""=====> Use Chinese prompt template!"""'], {}), "('=====> Use Chinese prompt template!')\n", (660, 699), False, 'import logging\n'), ((715, 767), 'llama_index.prompts.prompts.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['QUESTION_ANSWER_PROMPT_TMPL_CN'], {}), '(QUESTION_ANSWER_PROMPT_TMPL_CN)\n', (735, 767), False, 'from llama_index.prompts.prompts import QuestionAnswerPrompt\n')] |
from llama_index.core import Document
from llama_index.core.node_parser import CodeSplitter
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import get_response_synthesizer
from llama_index.core import PromptTemplate
from .defaults import DefaultLLM, DefaultEmbedder
from .default_prompt import DEFAULT_PROMPT
MAX_CHARS = 1500
K = 3
class ActionEngine:
def __init__(self, llm=None, embedding=None):
if (llm is None):
llm = DefaultLLM()
if (embedding is None):
embedding = DefaultEmbedder()
self.llm = llm
self.embedding = embedding
def _get_index(self, html):
text_list = [html]
documents = [Document(text=t) for t in text_list]
splitter = CodeSplitter(
language="html",
chunk_lines=40, # lines per chunk
chunk_lines_overlap=200, # lines overlap between chunks
max_chars=MAX_CHARS, # max chars per chunk
)
nodes = splitter.get_nodes_from_documents(documents)
nodes = [node for node in nodes if node.text]
index = VectorStoreIndex(nodes, embed_model=self.embedding)
return index
def get_query_engine(self, state):
html = state
index = self._get_index(html)
retriever = BM25Retriever.from_defaults(
index=index,
similarity_top_k=K,
)
response_synthesizer = get_response_synthesizer(streaming=True, llm=self.llm)
# assemble query engine
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
)
prompt_template = PromptTemplate(DEFAULT_PROMPT)
query_engine.update_prompts(
{"response_synthesizer:text_qa_template": prompt_template}
)
return query_engine
| [
"llama_index.core.node_parser.CodeSplitter",
"llama_index.core.query_engine.RetrieverQueryEngine",
"llama_index.retrievers.bm25.BM25Retriever.from_defaults",
"llama_index.core.VectorStoreIndex",
"llama_index.core.PromptTemplate",
"llama_index.core.Document",
"llama_index.core.get_response_synthesizer"
] | [((869, 964), 'llama_index.core.node_parser.CodeSplitter', 'CodeSplitter', ([], {'language': '"""html"""', 'chunk_lines': '(40)', 'chunk_lines_overlap': '(200)', 'max_chars': 'MAX_CHARS'}), "(language='html', chunk_lines=40, chunk_lines_overlap=200,\n max_chars=MAX_CHARS)\n", (881, 964), False, 'from llama_index.core.node_parser import CodeSplitter\n'), ((1226, 1277), 'llama_index.core.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'embed_model': 'self.embedding'}), '(nodes, embed_model=self.embedding)\n', (1242, 1277), False, 'from llama_index.core import VectorStoreIndex\n'), ((1420, 1480), 'llama_index.retrievers.bm25.BM25Retriever.from_defaults', 'BM25Retriever.from_defaults', ([], {'index': 'index', 'similarity_top_k': 'K'}), '(index=index, similarity_top_k=K)\n', (1447, 1480), False, 'from llama_index.retrievers.bm25 import BM25Retriever\n'), ((1548, 1602), 'llama_index.core.get_response_synthesizer', 'get_response_synthesizer', ([], {'streaming': '(True)', 'llm': 'self.llm'}), '(streaming=True, llm=self.llm)\n', (1572, 1602), False, 'from llama_index.core import get_response_synthesizer\n'), ((1659, 1748), 'llama_index.core.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (1679, 1748), False, 'from llama_index.core.query_engine import RetrieverQueryEngine\n'), ((1806, 1836), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['DEFAULT_PROMPT'], {}), '(DEFAULT_PROMPT)\n', (1820, 1836), False, 'from llama_index.core import PromptTemplate\n'), ((812, 828), 'llama_index.core.Document', 'Document', ([], {'text': 't'}), '(text=t)\n', (820, 828), False, 'from llama_index.core import Document\n')] |
import io
from pathlib import Path
from typing import List
import requests
## TODO: Make this token-aware.
def convert_messages_to_str(messages: List[dict]) -> str:
"""convert messages in OpenAI API format into a single string.
Args:
messages List[dict]: A list of dictionary in the following format:
```
[
{"role": "user", "content": "Hello there."},
{"role": "assistant", "content": "Hi, I'm Claude. How can I help you?"},
{"role": "user", "content": "Can you explain LLMs in plain English?"},
]
```
"""
final_str = ""
for msg in messages:
if "role" in msg and "content" in msg:
role = msg.get("role", "")
content = msg.get("content", "")
final_str += f"\n\n{role}: {content}"
return final_str
def format_assistant_message(raw_text: str) -> dict:
return {"role": "assistant", "content": raw_text}
def load_documents(file_path: str = None, folder_path: str = None, url: str = None) -> List[any]:
"""Loads documents from a local directory or public url or local filename.
Args:
file_path (str): The path to the filename.
folder_path (str): The path to the folder.
url (str): The url to the file.
"""
#check import packages
try:
from llama_index.core import Document, SimpleDirectoryReader
from llama_index.core.readers.download import download_loader
except ImportError:
raise ImportError("Could not import llama index package. "
"Please install it with `pip install llama-index-core==0.10.1`.")
#document loaders for filepath
if file_path:
if file_path.endswith(".pdf"):
PDFReader = download_loader("PDFReader")
loader = PDFReader()
documents = loader.load_data(file=Path(file_path))
elif file_path.endswith(".docx"):
docReader = download_loader("DocxReader")
loader = docReader()
documents = loader.load_data(file=Path(file_path))
elif file_path.endswith(".txt"):
with open(file_path, 'r') as file:
text_content = file.read()
documents = [Document(text=text_content)]
else:
raise ValueError("Only .pdf, .docx, and .txt files are supported.")
#document loaders for folderpath
if folder_path:
documents = SimpleDirectoryReader(
input_dir=Path(folder_path), required_exts=[".pdf", ".docx"]).load_data()
#document loaders for url
if url:
response = requests.get(url)
if response.status_code != 200:
raise ValueError(f"Invalid url {url}.")
#for text files
try:
documents = [Document(text=response.content)]
#for pdf files
except Exception:
#check import packages
try:
from pypdf import PdfReader
except ImportError:
raise ImportError("Could not import pypdf package. "
"Please install it with `pip install pypdf==3.17.4`.")
documents = []
pdf_file = PdfReader(io.BytesIO(response.content))
num_pages = len(pdf_file.pages)
for page in range(num_pages):
page_text = pdf_file.pages[page].extract_text()
documents.append(Document(text=page_text))
else:
raise ValueError(f"Invalid url {url}.")
return documents
def split_document(text: str, chunk_size: int, chunk_overlap: int, **kwargs) -> List[str]:
"""Splits a document into chunks of text.
Args:
text (str): The text to split.
chunk_size (int): The size of each chunk.
chunk_overlap (int): The amount of overlap between each chunk.
**kwargs: Additional keyword arguments for the SentenceSplitter.
"""
#check import packages
try:
from llama_index.core.node_parser.text import SentenceSplitter
except ImportError:
raise ImportError("Could not import llama index package. "
"Please install it with `pip install llama-index-core==0.10.1`.")
#document
text_parser = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs)
text_chunks = text_parser.split_text(text)
return text_chunks
| [
"llama_index.core.node_parser.text.SentenceSplitter",
"llama_index.core.readers.download.download_loader",
"llama_index.core.Document"
] | [((3916, 3994), 'llama_index.core.node_parser.text.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs)\n', (3932, 3994), False, 'from llama_index.core.node_parser.text import SentenceSplitter\n'), ((2438, 2455), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2450, 2455), False, 'import requests\n'), ((1681, 1709), 'llama_index.core.readers.download.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (1696, 1709), False, 'from llama_index.core.readers.download import download_loader\n'), ((1850, 1879), 'llama_index.core.readers.download.download_loader', 'download_loader', (['"""DocxReader"""'], {}), "('DocxReader')\n", (1865, 1879), False, 'from llama_index.core.readers.download import download_loader\n'), ((2586, 2617), 'llama_index.core.Document', 'Document', ([], {'text': 'response.content'}), '(text=response.content)\n', (2594, 2617), False, 'from llama_index.core import Document, SimpleDirectoryReader\n'), ((1777, 1792), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (1781, 1792), False, 'from pathlib import Path\n'), ((2952, 2980), 'io.BytesIO', 'io.BytesIO', (['response.content'], {}), '(response.content)\n', (2962, 2980), False, 'import io\n'), ((1947, 1962), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (1951, 1962), False, 'from pathlib import Path\n'), ((2096, 2123), 'llama_index.core.Document', 'Document', ([], {'text': 'text_content'}), '(text=text_content)\n', (2104, 2123), False, 'from llama_index.core import Document, SimpleDirectoryReader\n'), ((2320, 2337), 'pathlib.Path', 'Path', (['folder_path'], {}), '(folder_path)\n', (2324, 2337), False, 'from pathlib import Path\n'), ((3137, 3161), 'llama_index.core.Document', 'Document', ([], {'text': 'page_text'}), '(text=page_text)\n', (3145, 3161), False, 'from llama_index.core import Document, SimpleDirectoryReader\n')] |
from components.store import get_storage_context
from llama_index import VectorStoreIndex
from llama_index.retrievers import (
VectorIndexRetriever,
)
from models.gpts import get_gpts_by_uuids
def search_gpts(question):
storage_context = get_storage_context()
index = VectorStoreIndex.from_documents([], storage_context=storage_context)
retriever = VectorIndexRetriever(index=index, similarity_top_k=10)
nodes = retriever.retrieve(question)
uuids = []
uuids_with_scores = {}
gpts = []
for node in nodes:
print("node metadata", node.metadata)
if node.score > 0.80:
uuid = node.metadata['uuid']
uuids.append(uuid)
uuids_with_scores[uuid] = node.score
if len(uuids) == 0:
return gpts
rows = get_gpts_by_uuids(uuids)
for row in rows:
gpts.append({
"uuid": row.uuid,
"name": row.name,
"description": row.description,
"avatar_url": row.avatar_url,
"author_name": row.author_name,
"created_at": row.created_at,
"updated_at": row.updated_at,
"visit_url": "https://chat.openai.com/g/" + row.short_url,
"score": uuids_with_scores[row.uuid],
})
sorted_gpts = sorted(gpts, key=lambda x: x['score'], reverse=True)
return sorted_gpts
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.VectorIndexRetriever"
] | [((248, 269), 'components.store.get_storage_context', 'get_storage_context', ([], {}), '()\n', (267, 269), False, 'from components.store import get_storage_context\n'), ((282, 350), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[]'], {'storage_context': 'storage_context'}), '([], storage_context=storage_context)\n', (313, 350), False, 'from llama_index import VectorStoreIndex\n'), ((368, 422), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(10)'}), '(index=index, similarity_top_k=10)\n', (388, 422), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((799, 823), 'models.gpts.get_gpts_by_uuids', 'get_gpts_by_uuids', (['uuids'], {}), '(uuids)\n', (816, 823), False, 'from models.gpts import get_gpts_by_uuids\n')] |
import datetime
import io
import json
import os
import sys
import tempfile
import traceback
from typing import Optional, Dict, Any
import aiohttp
import re
import discord
import openai
from bs4 import BeautifulSoup
from discord.ext import pages
from langchain.utilities import (
GoogleSearchAPIWrapper,
)
from langchain.utilities import WolframAlphaAPIWrapper
from langchain.agents import (
Tool,
initialize_agent,
AgentType,
)
from langchain.chat_models import ChatOpenAI
from langchain.memory import (
ConversationSummaryBufferMemory,
)
from langchain.prompts import (
MessagesPlaceholder,
)
from langchain.requests import Requests
from langchain.schema import SystemMessage
from llama_index import (
GPTVectorStoreIndex,
Document,
SimpleDirectoryReader,
ServiceContext,
OpenAIEmbedding,
)
from llama_index.response_synthesizers import get_response_synthesizer, ResponseMode
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.prompts.chat_prompts import CHAT_REFINE_PROMPT
from pydantic import Extra, BaseModel
import tiktoken
from models.embed_statics_model import EmbedStatics
from models.search_model import Search
from services.deletion_service import Deletion
from services.environment_service import EnvService
from services.moderations_service import Moderation
from services.text_service import TextService
from models.openai_model import Models
from utils.safe_ctx_respond import safe_ctx_respond, safe_remove_list
from contextlib import redirect_stdout
from langchain.agents.conversational_chat.output_parser import ConvoOutputParser
original_parse = ConvoOutputParser.parse
def my_parse(self, text):
# Remove all pairs of triple backticks from the input. However, don't remove pairs of ```json and ```. Only remove ``` and ``` pairs, maintain the text between the pairs so that only the backticks
# are removed and the text is left intact.
text_without_triple_backticks = re.sub(
r"```(?!json)(.*?)```", r"\1", text, flags=re.DOTALL
)
# Call the original parse() method with the modified input
try:
result = original_parse(self, text_without_triple_backticks)
except Exception:
traceback.print_exc()
# Take the text and format it like
# {
# "action": "Final Answer",
# "action_input": text
# }
# This will cause the bot to respond with the text as if it were a final answer.
if "action_input" not in text_without_triple_backticks:
text_without_triple_backticks = f'{{"action": "Final Answer", "action_input": {json.dumps(text_without_triple_backticks)}}}'
result = original_parse(self, text_without_triple_backticks)
else:
# Insert "```json" before the opening curly brace
text_without_triple_backticks = re.sub(
r"({)", r"```json \1", text_without_triple_backticks
)
# Insert "```" after the closing curly brace
text_without_triple_backticks = re.sub(
r"(})", r"\1 ```", text_without_triple_backticks
)
result = original_parse(self, text_without_triple_backticks)
return result
# Replace the original parse function with the new one
ConvoOutputParser.parse = my_parse
class CaptureStdout:
def __enter__(self):
self.buffer = io.StringIO()
self.original_stdout = sys.stdout
sys.stdout = self.buffer
return self.buffer
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout = self.original_stdout
async def capture_stdout(func, *args, **kwargs):
with CaptureStdout() as buffer:
result = await func(*args, **kwargs)
captured_output = buffer.getvalue()
return result, captured_output
ALLOWED_GUILDS = EnvService.get_allowed_guilds()
USER_INPUT_API_KEYS = EnvService.get_user_input_api_keys()
USER_KEY_DB = EnvService.get_api_db()
PRE_MODERATE = EnvService.get_premoderate()
GOOGLE_API_KEY = EnvService.get_google_search_api_key()
GOOGLE_SEARCH_ENGINE_ID = EnvService.get_google_search_engine_id()
OPENAI_API_KEY = EnvService.get_openai_token()
# Set the environment
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
openai.api_key = os.environ["OPENAI_API_KEY"]
WOLFRAM_API_KEY = EnvService.get_wolfram_api_key()
vector_stores = {}
class RedoSearchUser:
def __init__(self, ctx, query, search_scope, nodes, response_mode):
self.ctx = ctx
self.query = query
self.search_scope = search_scope
self.nodes = nodes
self.response_mode = response_mode
class CustomTextRequestWrapper(BaseModel):
"""Lightweight wrapper around requests library.
The main purpose of this wrapper is to always return a text output.
"""
headers: Optional[Dict[str, str]] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def __init__(self, **data: Any):
super().__init__(**data)
@property
def requests(self) -> Requests:
return Requests(headers=self.headers, aiosession=self.aiosession)
def get(self, url: str, **kwargs: Any) -> str:
# the "url" field is actuall some input from the LLM, it is a comma separated string of the url and a boolean value and the original query
try:
url, model, original_query = url.split(",")
url = url.strip()
model = model.strip()
original_query = original_query.strip()
except:
url = url
model = "gpt-3.5-turbo"
original_query = "No Original Query Provided"
"""GET the URL and return the text."""
if not url.startswith("http"):
return (
"The website could not be crawled as an invalid URL was input. The input URL was "
+ url
)
text = self.requests.get(url, **kwargs).text
# Load this text into BeautifulSoup, clean it up and only retain text content within <p> and <title> and <h1> type tags, get rid of all javascript and css too.
soup = BeautifulSoup(text, "html.parser")
# Decompose script, style, head, and meta tags
for tag in soup(["script", "style", "head", "meta"]):
tag.decompose()
# Get remaining text from the soup object
text = soup.get_text()
# Clean up white spaces
text = re.sub(r"\s+", " ", text).strip()
# If not using GPT-4 and the text token amount is over 3500, truncate it to 3500 tokens
enc = tiktoken.encoding_for_model(model)
tokens = len(enc.encode(text))
if len(text) < 5:
return "This website could not be scraped. I cannot answer this question."
if (
model in Models.CHATGPT_MODELS
and tokens > Models.get_max_tokens(model) - 1000
) or (
model in Models.GPT4_MODELS and tokens > Models.get_max_tokens(model) - 1000
):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
f.write(text)
f.close()
document = SimpleDirectoryReader(input_files=[f.name]).load_data()
embed_model = OpenAIEmbedding()
service_context = ServiceContext.from_defaults(embed_model=embed_model)
index = GPTVectorStoreIndex.from_documents(
document, service_context=service_context, use_async=True
)
retriever = VectorIndexRetriever(
index=index, similarity_top_k=4, service_context=service_context
)
response_synthesizer = get_response_synthesizer(
response_mode=ResponseMode.COMPACT,
refine_template=CHAT_REFINE_PROMPT,
service_context=service_context,
use_async=True,
)
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
response_text = query_engine.query(original_query)
return response_text
return text
class SearchService(discord.Cog, name="SearchService"):
"""Cog containing translation commands and retrieval of translation services"""
def __init__(
self,
bot,
gpt_model,
usage_service,
deletion_service,
converser_cog,
):
super().__init__()
self.bot = bot
self.usage_service = usage_service
self.model = Search(gpt_model, usage_service)
self.EMBED_CUTOFF = 2000
self.redo_users = {}
self.chat_agents = {}
self.thread_awaiting_responses = []
self.converser_cog = converser_cog
# Make a mapping of all the country codes and their full country names:
async def paginate_embed(
self, response_text, user: discord.Member, original_link=None
):
"""Given a response text make embed pages and return a list of the pages."""
response_text = [
response_text[i : i + self.EMBED_CUTOFF]
for i in range(0, len(response_text), self.EMBED_CUTOFF)
]
pages = []
first = False
# Send each chunk as a message
for count, chunk in enumerate(response_text, start=1):
if not first:
page = discord.Embed(
title=(
"Search Results" if not original_link else "Follow-up results"
),
description=chunk,
url=original_link,
)
first = True
else:
page = discord.Embed(
title=f"Page {count}",
description=chunk,
url=original_link,
)
if user.avatar:
page.set_footer(
text=f"Requested by {user.name}", icon_url=user.avatar.url
)
else:
page.set_footer(
text=f"Requested by {user.name}", icon_url=user.default_avatar.url
)
pages.append(page)
return pages
@discord.Cog.listener()
async def on_message(self, message):
# Check if the message is from a bot.
if message.author.id == self.bot.user.id:
return
# Check if the message is from a guild.
if not message.guild:
return
# System message
if message.type != discord.MessageType.default:
return
if message.content.strip().startswith("~"):
return
# if we are still awaiting a response from the agent, then we don't want to process the message.
if message.channel.id in self.thread_awaiting_responses:
resp_message = await message.reply(
"Please wait for the agent to respond to a previous message first!"
)
deletion_time = datetime.datetime.now() + datetime.timedelta(seconds=5)
deletion_time = deletion_time.timestamp()
original_deletion_message = Deletion(message, deletion_time)
deletion_message = Deletion(resp_message, deletion_time)
await self.converser_cog.deletion_queue.put(deletion_message)
await self.converser_cog.deletion_queue.put(original_deletion_message)
return
# Pre moderation
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(message.content, message):
await message.delete()
return
prompt = message.content.strip()
# If the message channel is in self.chat_agents, then we delegate the message to the agent.
if message.channel.id in self.chat_agents:
if prompt.lower() in ["stop", "end", "quit", "exit"]:
await message.reply("Ending chat session.")
self.chat_agents.pop(message.channel.id)
# close the thread
thread = await self.bot.fetch_channel(message.channel.id)
await thread.edit(name="Closed-GPT")
await thread.edit(archived=True)
return
self.thread_awaiting_responses.append(message.channel.id)
try:
await message.channel.trigger_typing()
except:
pass
agent = self.chat_agents[message.channel.id]
used_tools = []
try:
# Start listening to STDOUT before this call. We wanna track all the output for this specific call below
self.usage_service.update_usage_memory(
message.guild.name, "internet_chat_message", 1
)
response, stdout_output = await capture_stdout(
self.bot.loop.run_in_executor, None, agent.run, prompt
)
response = str(response)
try:
print(stdout_output)
except:
traceback.print_exc()
stdout_output = ""
if "Wolfram-Tool" in stdout_output:
used_tools.append("Wolfram Alpha")
if "Search-Tool" in stdout_output:
used_tools.append("Google Search")
if "Web-Crawling-Tool" in stdout_output:
used_tools.append("Web Crawler")
except Exception as e:
response = f"Error: {e}"
traceback.print_exc()
await message.reply(
embed=EmbedStatics.get_internet_chat_failure_embed(response)
)
safe_remove_list(self.thread_awaiting_responses, message.channel.id)
return
if len(response) > 2000:
embed_pages = EmbedStatics.paginate_chat_embed(response)
for x, page in enumerate(embed_pages):
if x == 0:
previous_message = await message.reply(embed=page)
else:
previous_message = previous_message.reply(embed=page)
else:
response = response.replace("\\n", "\n")
# Build a response embed
response_embed = discord.Embed(
title="",
description=response,
color=0x808080,
)
if len(used_tools) > 0:
response_embed.set_footer(
text="Used tools: " + ", ".join(used_tools)
)
await message.reply(embed=response_embed)
safe_remove_list(self.thread_awaiting_responses, message.channel.id)
async def search_chat_command(
self,
ctx: discord.ApplicationContext,
model,
search_scope=2,
temperature=0,
top_p=1,
):
await ctx.defer()
embed_title = f"{ctx.user.name}'s internet-connected conversation with GPT"
message_embed = discord.Embed(
title=embed_title,
description=f"The agent will visit and browse **{search_scope}** link(s) every time it needs to access the internet.\nCrawling is enabled, send the bot a link for it to access it!\nModel: {model}\n\nType `end` to stop the conversation",
color=0xBA6093,
)
message_embed.set_thumbnail(url="https://i.imgur.com/sioynYZ.png")
message_embed.set_footer(
text="Internet Chat", icon_url="https://i.imgur.com/sioynYZ.png"
)
message_thread = await ctx.send(embed=message_embed)
thread = await message_thread.create_thread(
name=ctx.user.name + "'s internet-connected conversation with GPT",
auto_archive_duration=60,
)
await safe_ctx_respond(ctx=ctx, content="Conversation started.")
# Make a new agent for this user to chat.
search = GoogleSearchAPIWrapper(
google_api_key=GOOGLE_API_KEY,
google_cse_id=GOOGLE_SEARCH_ENGINE_ID,
k=search_scope,
)
requests = CustomTextRequestWrapper()
tools = [
Tool(
name="Search-Tool",
func=search.run,
description="useful when you need to answer questions about current events or retrieve information about a topic that may require the internet. The input to this tool is a search query to ask google. Search queries should be less than 8 words. For example, an input could be 'What is the weather like in New York?' and the tool input would be 'weather new york'.",
),
# The requests tool
Tool(
name="Web-Crawling-Tool",
func=requests.get,
description=f"Useful for when the user provides you with a website link, use this tool to crawl the website and retrieve information from it. The input to this tool is a comma separated list of three values, the first value is the link to crawl for, and the second value is {model} and is the GPT model used, and the third value is the original question that the user asked. For example, an input could be 'https://google.com', gpt-4-32k, 'What is this webpage?'. This tool should only be used if a direct link is provided and not in conjunction with other tools. The link should always start with http or https.",
),
]
# Try to add wolfram tool
try:
wolfram = WolframAlphaAPIWrapper(wolfram_alpha_appid=WOLFRAM_API_KEY)
tools.append(
Tool(
name="Wolfram-Tool",
func=wolfram.run,
description="useful when you need to answer questions about math, solve equations, do proofs, mathematical science questions, science questions, and when asked to do numerical based reasoning.",
)
)
print("Wolfram tool added to internet-connected conversation agent.")
except Exception:
traceback.print_exc()
print("Wolfram tool not added to internet-connected conversation agent.")
llm = ChatOpenAI(
model=model,
temperature=temperature,
top_p=top_p,
openai_api_key=OPENAI_API_KEY,
)
max_token_limit = 29000 if "gpt-4" in model else 7500
memory = ConversationSummaryBufferMemory(
memory_key="memory",
return_messages=True,
llm=llm,
max_token_limit=100000 if "preview" in model else max_token_limit,
)
agent_kwargs = {
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
"system_message": SystemMessage(
content="You are a superpowered version of GPT-4 that is able to access the internet. You can use google search to browse the web, you can crawl the web to see the content of specific websites, and in some cases you can also use Wolfram Alpha to perform mathematical operations. Use all of these tools to your advantage. You can use tools multiple times, for example if asked a complex question, search multiple times for different pieces of info until you achieve your goal."
),
}
agent_chain = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
agent_kwargs=agent_kwargs,
memory=memory,
handle_parsing_errors="Check your output and make sure it conforms!",
max_iterations=5,
)
self.chat_agents[thread.id] = agent_chain
async def search_command(
self,
ctx: discord.ApplicationContext,
query,
search_scope,
nodes,
deep,
response_mode,
model,
multistep=False,
redo=None,
from_followup=None,
followup_user=None,
):
"""Command handler for the search command"""
await ctx.defer() if not redo else None
# Check the opener for bad content.
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(query, ctx):
return
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
if (
not EnvService.get_google_search_api_key()
or not EnvService.get_google_search_engine_id()
):
await ctx.respond(
embed=EmbedStatics.get_search_failure_embed(
str("The search service is not enabled on this server.")
),
)
return
try:
response, refined_text = await self.model.search(
ctx,
query,
user_api_key,
search_scope,
nodes,
deep,
response_mode,
model,
multistep,
)
except ValueError as e:
traceback.print_exc()
await ctx.respond(
embed=EmbedStatics.get_search_failure_embed(str(e)),
ephemeral=True,
)
return
except Exception as e:
await ctx.respond(
embed=EmbedStatics.get_search_failure_embed(str(e)), ephemeral=True
)
traceback.print_exc()
return
url_extract_pattern = "https?:\\/\\/(?:www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}\\b(?:[-a-zA-Z0-9()@:%_\\+.~#?&\\/=]*)"
urls = re.findall(
url_extract_pattern,
str(response.get_formatted_sources(length=200)),
flags=re.IGNORECASE,
)
urls = "\n".join(f"<{url}>" for url in urls)
# Deduplicate the urls
urls = "\n".join(dict.fromkeys(urls.split("\n")))
if from_followup:
original_link, followup_question = (
from_followup.original_link,
from_followup.followup_question,
)
query_response_message = f"**Question:**\n\n`{followup_question}`\n\n**Google Search Query**\n\n`{refined_text.strip()}`\n\n**Final Answer:**\n\n{response.response.strip()}\n\n**Sources:**\n{urls}"
else:
query_response_message = f"**Question:**\n\n`{query.strip()}`\n\n**Google Search Query**\n\n`{refined_text.strip()}`\n\n**Final Answer:**\n\n{response.response.strip()}\n\n**Sources:**\n{urls}"
query_response_message = query_response_message.replace(
"<|endofstatement|>", ""
)
query_response_message = query_response_message.replace(
"Answer to original:\n", ""
)
query_response_message = query_response_message.replace(
"Answer to follow-up:\n", ""
)
# If the response is too long, lets paginate using the discord pagination
# helper
embed_pages = await self.paginate_embed(
query_response_message,
ctx.user if not followup_user else followup_user,
original_link if from_followup else None,
)
paginator = pages.Paginator(
pages=embed_pages,
timeout=None,
author_check=False,
custom_view=SearchView(ctx, self, query_response_message),
)
self.redo_users[ctx.user.id] = RedoSearchUser(
ctx, query, search_scope, nodes, response_mode
)
await paginator.respond(ctx.interaction)
class SearchView(discord.ui.View):
def __init__(
self,
ctx,
search_cog,
response_text,
):
super().__init__(timeout=None) # No timeout
self.search_cog = search_cog
self.ctx = ctx
self.response_text = response_text
self.add_item(RedoButton(self.ctx, self.search_cog))
self.add_item(FollowupButton(self.ctx, self.search_cog, self.response_text))
# A view for a follow-up button
class FollowupButton(discord.ui.Button["SearchView"]):
def __init__(self, ctx, search_cog, response_text):
super().__init__(label="Follow Up", style=discord.ButtonStyle.green)
self.search_cog = search_cog
self.ctx = ctx
self.response_text = response_text
async def callback(self, interaction: discord.Interaction):
"""Send the followup modal"""
await interaction.response.send_modal(
modal=FollowupModal(self.ctx, self.search_cog, self.response_text)
)
# A view for a redo button
class RedoButton(discord.ui.Button["SearchView"]):
def __init__(self, ctx, search_cog):
super().__init__(
style=discord.ButtonStyle.danger,
label="Redo",
custom_id="redo_search_button",
)
self.ctx = ctx
self.search_cog = search_cog
async def callback(self, interaction: discord.Interaction):
"""Redo the search"""
await interaction.response.send_message(
embed=EmbedStatics.get_search_redo_progress_embed(),
ephemeral=True,
delete_after=15,
)
await self.search_cog.search_command(
self.search_cog.redo_users[self.ctx.user.id].ctx,
self.search_cog.redo_users[self.ctx.user.id].query,
self.search_cog.redo_users[self.ctx.user.id].search_scope,
self.search_cog.redo_users[self.ctx.user.id].nodes,
deep=False,
redo=True,
response_mode=self.search_cog.redo_users[self.ctx.user.id].response_mode,
)
class FollowupData:
def __init__(self, original_link, followup_question):
self.original_link = original_link
self.followup_question = followup_question
# The modal for following up
class FollowupModal(discord.ui.Modal):
def __init__(self, ctx, search_cog, response_text) -> None:
super().__init__(title="Search Follow-up")
# Get the argument named "user_key_db" and save it as USER_KEY_DB
self.search_cog = search_cog
self.ctx = ctx
self.response_text = response_text
self.add_item(
discord.ui.InputText(
label="What other questions do you have?",
placeholder="",
)
)
async def callback(self, interaction: discord.Interaction):
await interaction.response.defer()
query = self.search_cog.redo_users[self.ctx.user.id].query
# In the response text, get only the text between "**Final Answer:**" and "**Sources:**"
self.response_text = self.response_text.split("**Final Answer:**")[1].split(
"**Sources:**"
)[0]
# Build the context
context_text = (
"Original question: "
+ query
+ "\n"
+ "Answer to original: "
+ self.response_text
+ "\n"
+ "Follow-up question: "
+ self.children[0].value
)
# Get the link of the message that the user interacted on
message_link = f"https://discord.com/channels/{interaction.guild_id}/{interaction.channel_id}/{interaction.message.id}"
await self.search_cog.search_command(
self.search_cog.redo_users[self.ctx.user.id].ctx,
context_text,
self.search_cog.redo_users[self.ctx.user.id].search_scope,
self.search_cog.redo_users[self.ctx.user.id].nodes,
deep=False,
redo=True,
from_followup=FollowupData(message_link, self.children[0].value),
response_mode=self.search_cog.redo_users[self.ctx.user.id].response_mode,
followup_user=interaction.user,
model="gpt-4-32k",
)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.response_synthesizers.get_response_synthesizer",
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.query_engine.RetrieverQueryEngine"
] | [((3894, 3925), 'services.environment_service.EnvService.get_allowed_guilds', 'EnvService.get_allowed_guilds', ([], {}), '()\n', (3923, 3925), False, 'from services.environment_service import EnvService\n'), ((3948, 3984), 'services.environment_service.EnvService.get_user_input_api_keys', 'EnvService.get_user_input_api_keys', ([], {}), '()\n', (3982, 3984), False, 'from services.environment_service import EnvService\n'), ((3999, 4022), 'services.environment_service.EnvService.get_api_db', 'EnvService.get_api_db', ([], {}), '()\n', (4020, 4022), False, 'from services.environment_service import EnvService\n'), ((4038, 4066), 'services.environment_service.EnvService.get_premoderate', 'EnvService.get_premoderate', ([], {}), '()\n', (4064, 4066), False, 'from services.environment_service import EnvService\n'), ((4084, 4122), 'services.environment_service.EnvService.get_google_search_api_key', 'EnvService.get_google_search_api_key', ([], {}), '()\n', (4120, 4122), False, 'from services.environment_service import EnvService\n'), ((4149, 4189), 'services.environment_service.EnvService.get_google_search_engine_id', 'EnvService.get_google_search_engine_id', ([], {}), '()\n', (4187, 4189), False, 'from services.environment_service import EnvService\n'), ((4207, 4236), 'services.environment_service.EnvService.get_openai_token', 'EnvService.get_openai_token', ([], {}), '()\n', (4234, 4236), False, 'from services.environment_service import EnvService\n'), ((4370, 4402), 'services.environment_service.EnvService.get_wolfram_api_key', 'EnvService.get_wolfram_api_key', ([], {}), '()\n', (4400, 4402), False, 'from services.environment_service import EnvService\n'), ((2028, 2087), 're.sub', 're.sub', (['"""```(?!json)(.*?)```"""', '"""\\\\1"""', 'text'], {'flags': 're.DOTALL'}), "('```(?!json)(.*?)```', '\\\\1', text, flags=re.DOTALL)\n", (2034, 2087), False, 'import re\n'), ((10472, 10494), 'discord.Cog.listener', 'discord.Cog.listener', ([], {}), '()\n', (10492, 10494), False, 'import discord\n'), ((3458, 3471), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3469, 3471), False, 'import io\n'), ((5238, 5296), 'langchain.requests.Requests', 'Requests', ([], {'headers': 'self.headers', 'aiosession': 'self.aiosession'}), '(headers=self.headers, aiosession=self.aiosession)\n', (5246, 5296), False, 'from langchain.requests import Requests\n'), ((6293, 6327), 'bs4.BeautifulSoup', 'BeautifulSoup', (['text', '"""html.parser"""'], {}), "(text, 'html.parser')\n", (6306, 6327), False, 'from bs4 import BeautifulSoup\n'), ((6749, 6783), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (6776, 6783), False, 'import tiktoken\n'), ((8790, 8822), 'models.search_model.Search', 'Search', (['gpt_model', 'usage_service'], {}), '(gpt_model, usage_service)\n', (8796, 8822), False, 'from models.search_model import Search\n'), ((15403, 15698), 'discord.Embed', 'discord.Embed', ([], {'title': 'embed_title', 'description': 'f"""The agent will visit and browse **{search_scope}** link(s) every time it needs to access the internet.\nCrawling is enabled, send the bot a link for it to access it!\nModel: {model}\n\nType `end` to stop the conversation"""', 'color': '(12214419)'}), '(title=embed_title, description=\n f"""The agent will visit and browse **{search_scope}** link(s) every time it needs to access the internet.\nCrawling is enabled, send the bot a link for it to access it!\nModel: {model}\n\nType `end` to stop the conversation"""\n , color=12214419)\n', (15416, 15698), False, 'import discord\n'), ((16315, 16428), 'langchain.utilities.GoogleSearchAPIWrapper', 'GoogleSearchAPIWrapper', ([], {'google_api_key': 'GOOGLE_API_KEY', 'google_cse_id': 'GOOGLE_SEARCH_ENGINE_ID', 'k': 'search_scope'}), '(google_api_key=GOOGLE_API_KEY, google_cse_id=\n GOOGLE_SEARCH_ENGINE_ID, k=search_scope)\n', (16337, 16428), False, 'from langchain.utilities import GoogleSearchAPIWrapper\n'), ((18550, 18646), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': 'model', 'temperature': 'temperature', 'top_p': 'top_p', 'openai_api_key': 'OPENAI_API_KEY'}), '(model=model, temperature=temperature, top_p=top_p,\n openai_api_key=OPENAI_API_KEY)\n', (18560, 18646), False, 'from langchain.chat_models import ChatOpenAI\n'), ((18783, 18937), 'langchain.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'memory_key': '"""memory"""', 'return_messages': '(True)', 'llm': 'llm', 'max_token_limit': "(100000 if 'preview' in model else max_token_limit)"}), "(memory_key='memory', return_messages=True,\n llm=llm, max_token_limit=100000 if 'preview' in model else max_token_limit)\n", (18814, 18937), False, 'from langchain.memory import ConversationSummaryBufferMemory\n'), ((19689, 19917), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'llm', 'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'agent_kwargs': 'agent_kwargs', 'memory': 'memory', 'handle_parsing_errors': '"""Check your output and make sure it conforms!"""', 'max_iterations': '(5)'}), "(tools=tools, llm=llm, agent=AgentType.OPENAI_FUNCTIONS,\n verbose=True, agent_kwargs=agent_kwargs, memory=memory,\n handle_parsing_errors='Check your output and make sure it conforms!',\n max_iterations=5)\n", (19705, 19917), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((2275, 2296), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2294, 2296), False, 'import traceback\n'), ((10425, 10443), 'discord.ext.pages.append', 'pages.append', (['page'], {}), '(page)\n', (10437, 10443), False, 'from discord.ext import pages\n'), ((11418, 11450), 'services.deletion_service.Deletion', 'Deletion', (['message', 'deletion_time'], {}), '(message, deletion_time)\n', (11426, 11450), False, 'from services.deletion_service import Deletion\n'), ((11482, 11519), 'services.deletion_service.Deletion', 'Deletion', (['resp_message', 'deletion_time'], {}), '(resp_message, deletion_time)\n', (11490, 11519), False, 'from services.deletion_service import Deletion\n'), ((15023, 15091), 'utils.safe_ctx_respond.safe_remove_list', 'safe_remove_list', (['self.thread_awaiting_responses', 'message.channel.id'], {}), '(self.thread_awaiting_responses, message.channel.id)\n', (15039, 15091), False, 'from utils.safe_ctx_respond import safe_ctx_respond, safe_remove_list\n'), ((16188, 16246), 'utils.safe_ctx_respond.safe_ctx_respond', 'safe_ctx_respond', ([], {'ctx': 'ctx', 'content': '"""Conversation started."""'}), "(ctx=ctx, content='Conversation started.')\n", (16204, 16246), False, 'from utils.safe_ctx_respond import safe_ctx_respond, safe_remove_list\n'), ((16549, 16965), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search-Tool"""', 'func': 'search.run', 'description': '"""useful when you need to answer questions about current events or retrieve information about a topic that may require the internet. The input to this tool is a search query to ask google. Search queries should be less than 8 words. For example, an input could be \'What is the weather like in New York?\' and the tool input would be \'weather new york\'."""'}), '(name=\'Search-Tool\', func=search.run, description=\n "useful when you need to answer questions about current events or retrieve information about a topic that may require the internet. The input to this tool is a search query to ask google. Search queries should be less than 8 words. For example, an input could be \'What is the weather like in New York?\' and the tool input would be \'weather new york\'."\n )\n', (16553, 16965), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((17064, 17738), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Web-Crawling-Tool"""', 'func': 'requests.get', 'description': 'f"""Useful for when the user provides you with a website link, use this tool to crawl the website and retrieve information from it. The input to this tool is a comma separated list of three values, the first value is the link to crawl for, and the second value is {model} and is the GPT model used, and the third value is the original question that the user asked. For example, an input could be \'https://google.com\', gpt-4-32k, \'What is this webpage?\'. This tool should only be used if a direct link is provided and not in conjunction with other tools. The link should always start with http or https."""'}), '(name=\'Web-Crawling-Tool\', func=requests.get, description=\n f"Useful for when the user provides you with a website link, use this tool to crawl the website and retrieve information from it. The input to this tool is a comma separated list of three values, the first value is the link to crawl for, and the second value is {model} and is the GPT model used, and the third value is the original question that the user asked. For example, an input could be \'https://google.com\', gpt-4-32k, \'What is this webpage?\'. This tool should only be used if a direct link is provided and not in conjunction with other tools. The link should always start with http or https."\n )\n', (17068, 17738), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((17873, 17932), 'langchain.utilities.WolframAlphaAPIWrapper', 'WolframAlphaAPIWrapper', ([], {'wolfram_alpha_appid': 'WOLFRAM_API_KEY'}), '(wolfram_alpha_appid=WOLFRAM_API_KEY)\n', (17895, 17932), False, 'from langchain.utilities import WolframAlphaAPIWrapper\n'), ((19133, 19634), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a superpowered version of GPT-4 that is able to access the internet. You can use google search to browse the web, you can crawl the web to see the content of specific websites, and in some cases you can also use Wolfram Alpha to perform mathematical operations. Use all of these tools to your advantage. You can use tools multiple times, for example if asked a complex question, search multiple times for different pieces of info until you achieve your goal."""'}), "(content=\n 'You are a superpowered version of GPT-4 that is able to access the internet. You can use google search to browse the web, you can crawl the web to see the content of specific websites, and in some cases you can also use Wolfram Alpha to perform mathematical operations. Use all of these tools to your advantage. You can use tools multiple times, for example if asked a complex question, search multiple times for different pieces of info until you achieve your goal.'\n )\n", (19146, 19634), False, 'from langchain.schema import SystemMessage\n'), ((26726, 26805), 'discord.ui.InputText', 'discord.ui.InputText', ([], {'label': '"""What other questions do you have?"""', 'placeholder': '""""""'}), "(label='What other questions do you have?', placeholder='')\n", (26746, 26805), False, 'import discord\n'), ((2923, 2982), 're.sub', 're.sub', (['"""({)"""', '"""```json \\\\1"""', 'text_without_triple_backticks'], {}), "('({)', '```json \\\\1', text_without_triple_backticks)\n", (2929, 2982), False, 'import re\n'), ((3116, 3171), 're.sub', 're.sub', (['"""(})"""', '"""\\\\1 ```"""', 'text_without_triple_backticks'], {}), "('(})', '\\\\1 ```', text_without_triple_backticks)\n", (3122, 3171), False, 'import re\n'), ((6604, 6629), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'text'], {}), "('\\\\s+', ' ', text)\n", (6610, 6629), False, 'import re\n'), ((7185, 7236), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (7212, 7236), False, 'import tempfile\n'), ((7412, 7429), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (7427, 7429), False, 'from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, ServiceContext, OpenAIEmbedding\n'), ((7464, 7517), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (7492, 7517), False, 'from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, ServiceContext, OpenAIEmbedding\n'), ((7542, 7640), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['document'], {'service_context': 'service_context', 'use_async': '(True)'}), '(document, service_context=\n service_context, use_async=True)\n', (7576, 7640), False, 'from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, ServiceContext, OpenAIEmbedding\n'), ((7702, 7793), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(4)', 'service_context': 'service_context'}), '(index=index, similarity_top_k=4, service_context=\n service_context)\n', (7722, 7793), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((7866, 8019), 'llama_index.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': 'ResponseMode.COMPACT', 'refine_template': 'CHAT_REFINE_PROMPT', 'service_context': 'service_context', 'use_async': '(True)'}), '(response_mode=ResponseMode.COMPACT,\n refine_template=CHAT_REFINE_PROMPT, service_context=service_context,\n use_async=True)\n', (7890, 8019), False, 'from llama_index.response_synthesizers import get_response_synthesizer, ResponseMode\n'), ((8142, 8231), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (8162, 8231), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((9626, 9751), 'discord.Embed', 'discord.Embed', ([], {'title': "('Search Results' if not original_link else 'Follow-up results')", 'description': 'chunk', 'url': 'original_link'}), "(title='Search Results' if not original_link else\n 'Follow-up results', description=chunk, url=original_link)\n", (9639, 9751), False, 'import discord\n'), ((9945, 10019), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Page {count}"""', 'description': 'chunk', 'url': 'original_link'}), "(title=f'Page {count}', description=chunk, url=original_link)\n", (9958, 10019), False, 'import discord\n'), ((11267, 11290), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11288, 11290), False, 'import datetime\n'), ((11293, 11322), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (11311, 11322), False, 'import datetime\n'), ((11768, 11832), 'services.moderations_service.Moderation.simple_moderate_and_respond', 'Moderation.simple_moderate_and_respond', (['message.content', 'message'], {}), '(message.content, message)\n', (11806, 11832), False, 'from services.moderations_service import Moderation\n'), ((14175, 14217), 'models.embed_statics_model.EmbedStatics.paginate_chat_embed', 'EmbedStatics.paginate_chat_embed', (['response'], {}), '(response)\n', (14207, 14217), False, 'from models.embed_statics_model import EmbedStatics\n'), ((14634, 14694), 'discord.Embed', 'discord.Embed', ([], {'title': '""""""', 'description': 'response', 'color': '(8421504)'}), "(title='', description=response, color=8421504)\n", (14647, 14694), False, 'import discord\n'), ((17975, 18223), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Wolfram-Tool"""', 'func': 'wolfram.run', 'description': '"""useful when you need to answer questions about math, solve equations, do proofs, mathematical science questions, science questions, and when asked to do numerical based reasoning."""'}), "(name='Wolfram-Tool', func=wolfram.run, description=\n 'useful when you need to answer questions about math, solve equations, do proofs, mathematical science questions, science questions, and when asked to do numerical based reasoning.'\n )\n", (17979, 18223), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((18427, 18448), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (18446, 18448), False, 'import traceback\n'), ((19057, 19100), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""memory"""'}), "(variable_name='memory')\n", (19076, 19100), False, 'from langchain.prompts import MessagesPlaceholder\n'), ((20553, 20603), 'services.moderations_service.Moderation.simple_moderate_and_respond', 'Moderation.simple_moderate_and_respond', (['query', 'ctx'], {}), '(query, ctx)\n', (20591, 20603), False, 'from services.moderations_service import Moderation\n'), ((20722, 20781), 'services.text_service.TextService.get_user_api_key', 'TextService.get_user_api_key', (['ctx.user.id', 'ctx', 'USER_KEY_DB'], {}), '(ctx.user.id, ctx, USER_KEY_DB)\n', (20750, 20781), False, 'from services.text_service import TextService\n'), ((20898, 20936), 'services.environment_service.EnvService.get_google_search_api_key', 'EnvService.get_google_search_api_key', ([], {}), '()\n', (20934, 20936), False, 'from services.environment_service import EnvService\n'), ((20956, 20996), 'services.environment_service.EnvService.get_google_search_engine_id', 'EnvService.get_google_search_engine_id', ([], {}), '()\n', (20994, 20996), False, 'from services.environment_service import EnvService\n'), ((21593, 21614), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (21612, 21614), False, 'import traceback\n'), ((21952, 21973), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (21971, 21973), False, 'import traceback\n'), ((13841, 13862), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (13860, 13862), False, 'import traceback\n'), ((14015, 14083), 'utils.safe_ctx_respond.safe_remove_list', 'safe_remove_list', (['self.thread_awaiting_responses', 'message.channel.id'], {}), '(self.thread_awaiting_responses, message.channel.id)\n', (14031, 14083), False, 'from utils.safe_ctx_respond import safe_ctx_respond, safe_remove_list\n'), ((25590, 25635), 'models.embed_statics_model.EmbedStatics.get_search_redo_progress_embed', 'EmbedStatics.get_search_redo_progress_embed', ([], {}), '()\n', (25633, 25635), False, 'from models.embed_statics_model import EmbedStatics\n'), ((2683, 2724), 'json.dumps', 'json.dumps', (['text_without_triple_backticks'], {}), '(text_without_triple_backticks)\n', (2693, 2724), False, 'import json\n'), ((7017, 7045), 'models.openai_model.Models.get_max_tokens', 'Models.get_max_tokens', (['model'], {}), '(model)\n', (7038, 7045), False, 'from models.openai_model import Models\n'), ((7121, 7149), 'models.openai_model.Models.get_max_tokens', 'Models.get_max_tokens', (['model'], {}), '(model)\n', (7142, 7149), False, 'from models.openai_model import Models\n'), ((7326, 7369), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[f.name]'}), '(input_files=[f.name])\n', (7347, 7369), False, 'from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, ServiceContext, OpenAIEmbedding\n'), ((13363, 13384), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (13382, 13384), False, 'import traceback\n'), ((13926, 13980), 'models.embed_statics_model.EmbedStatics.get_internet_chat_failure_embed', 'EmbedStatics.get_internet_chat_failure_embed', (['response'], {}), '(response)\n', (13970, 13980), False, 'from models.embed_statics_model import EmbedStatics\n')] |
"""LanceDB vector store with cloud storage support."""
import os
from typing import Any, Optional
from dotenv import load_dotenv
from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase
from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities
from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryResult
from pandas import DataFrame
load_dotenv()
class LanceDBVectorStore(LanceDBVectorStoreBase):
"""Advanced LanceDB Vector Store supporting cloud storage and prefiltering."""
from lancedb.query import LanceQueryBuilder
from lancedb.table import Table
def __init__(
self,
uri: str,
table_name: str = "vectors",
nprobes: int = 20,
refine_factor: Optional[int] = None,
api_key: Optional[str] = None,
region: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Init params."""
self._setup_connection(uri, api_key, region)
self.uri = uri
self.table_name = table_name
self.nprobes = nprobes
self.refine_factor = refine_factor
self.api_key = api_key
self.region = region
def _setup_connection(self, uri: str, api_key: Optional[str] = None, region: Optional[str] = None):
"""Establishes a robust connection to LanceDB."""
api_key = api_key or os.getenv('LANCEDB_API_KEY')
region = region or os.getenv('LANCEDB_REGION')
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
if api_key and region:
self.connection = lancedb.connect(uri, api_key=api_key, region=region)
else:
self.connection = lancedb.connect(uri)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Enhanced query method to support prefiltering in LanceDB queries."""
table = self.connection.open_table(self.table_name)
lance_query = self._prepare_lance_query(query, table, **kwargs)
results = lance_query.to_df()
return self._construct_query_result(results)
def _prepare_lance_query(self, query: VectorStoreQuery, table: Table, **kwargs) -> LanceQueryBuilder:
"""Prepares the LanceDB query considering prefiltering and additional parameters."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface.")
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
prefilter = kwargs.pop("prefilter", False)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(query.query_embedding).limit(query.similarity_top_k).where(
where, prefilter=prefilter).nprobes(self.nprobes))
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
return lance_query
def _construct_query_result(self, results: DataFrame) -> VectorStoreQueryResult:
"""Constructs a VectorStoreQueryResult from a LanceDB query result."""
nodes = []
for _, row in results.iterrows():
node = TextNode(
text=row.get('text', ''), # ensure text is a string
id_=row['id'],
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=row['doc_id']),
})
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"llama_index.vector_stores.lancedb._to_llama_similarities",
"llama_index.schema.RelatedNodeInfo",
"llama_index.vector_stores.lancedb._to_lance_filter"
] | [((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGION"""'], {}), "('LANCEDB_REGION')\n", (1529, 1547), False, 'import os\n'), ((1814, 1866), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': 'region'}), '(uri, api_key=api_key, region=region)\n', (1829, 1866), False, 'import lancedb\n'), ((1911, 1931), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1926, 1931), False, 'import lancedb\n'), ((2898, 2929), 'llama_index.vector_stores.lancedb._to_lance_filter', '_to_lance_filter', (['query.filters'], {}), '(query.filters)\n', (2914, 2929), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((4021, 4052), 'llama_index.vector_stores.lancedb._to_llama_similarities', '_to_llama_similarities', (['results'], {}), '(results)\n', (4043, 4052), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((3841, 3879), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "row['doc_id']"}), "(node_id=row['doc_id'])\n", (3856, 3879), False, 'from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')] |
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
Prompt,
)
from agenta import post, FloatParam, TextParam
import os
def ingest():
if not os.path.exists("./storage"):
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist()
else:
storage_context = StorageContext.from_defaults(persist_dir="./storage")
# rebuild storage context
index = load_index_from_storage(storage_context)
return index
default_prompt = (
"We have provided context information below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given this information, please answer the question: {query_str}\n"
)
@post
def query(question: str, prompt: TextParam = default_prompt) -> str:
index = ingest()
QA_TEMPLATE = Prompt(prompt)
#
query_engine = index.as_query_engine(text_qa_template=QA_TEMPLATE)
response = query_engine.query(question)
return str(response)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults",
"llama_index.Prompt",
"llama_index.load_index_from_storage"
] | [((957, 971), 'llama_index.Prompt', 'Prompt', (['prompt'], {}), '(prompt)\n', (963, 971), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, Prompt\n'), ((222, 249), 'os.path.exists', 'os.path.exists', (['"""./storage"""'], {}), "('./storage')\n", (236, 249), False, 'import os\n'), ((329, 371), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (360, 371), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, Prompt\n'), ((448, 501), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (476, 501), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, Prompt\n'), ((552, 592), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (575, 592), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, Prompt\n'), ((271, 300), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (292, 300), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, Prompt\n')] |
from typing import Dict, List, Optional
import logging
from pathlib import Path
from datetime import datetime
import s3fs
from fsspec.asyn import AsyncFileSystem
from llama_index import (
ServiceContext,
VectorStoreIndex,
StorageContext,
load_indices_from_storage,
)
from llama_index.vector_stores.types import VectorStore
from tempfile import TemporaryDirectory
import requests
import nest_asyncio
from datetime import timedelta
from cachetools import cached, TTLCache
from llama_index.readers.file.docs_reader import PDFReader
from llama_index.schema import Document as LlamaIndexDocument
from llama_index.agent import OpenAIAgent
from llama_index.llms import ChatMessage, OpenAI
from llama_index.embeddings.openai import (
OpenAIEmbedding,
OpenAIEmbeddingMode,
OpenAIEmbeddingModelType,
)
from llama_index.llms.base import MessageRole
from llama_index.callbacks.base import BaseCallbackHandler, CallbackManager
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.query_engine import SubQuestionQueryEngine
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.vector_stores.types import (
MetadataFilters,
ExactMatchFilter,
)
from llama_index.node_parser import SentenceSplitter
from app.core.config import settings
from app.schema import (
Message as MessageSchema,
Document as DocumentSchema,
Conversation as ConversationSchema,
DocumentMetadataKeysEnum,
SecDocumentMetadata,
)
from app.models.db import MessageRoleEnum, MessageStatusEnum
from app.chat.constants import (
DB_DOC_ID_KEY,
SYSTEM_MESSAGE,
NODE_PARSER_CHUNK_OVERLAP,
NODE_PARSER_CHUNK_SIZE,
)
from app.chat.tools import get_api_query_engine_tool
from app.chat.utils import build_title_for_document
from app.chat.pg_vector import get_vector_store_singleton
from app.chat.qa_response_synth import get_custom_response_synth
logger = logging.getLogger(__name__)
logger.info("Applying nested asyncio patch")
nest_asyncio.apply()
OPENAI_TOOL_LLM_NAME = "gpt-3.5-turbo-0613"
OPENAI_CHAT_LLM_NAME = "gpt-3.5-turbo-0613"
def get_s3_fs() -> AsyncFileSystem:
s3 = s3fs.S3FileSystem(
key=settings.AWS_KEY,
secret=settings.AWS_SECRET,
endpoint_url=settings.S3_ENDPOINT_URL,
)
if not (settings.RENDER or s3.exists(settings.S3_BUCKET_NAME)):
s3.mkdir(settings.S3_BUCKET_NAME)
return s3
def fetch_and_read_document(
document: DocumentSchema,
) -> List[LlamaIndexDocument]:
# Super hacky approach to get this to feature complete on time.
# TODO: Come up with better abstractions for this and the other methods in this module.
with TemporaryDirectory() as temp_dir:
temp_file_path = Path(temp_dir) / f"{str(document.id)}.pdf"
with open(temp_file_path, "wb") as temp_file:
with requests.get(document.url, stream=True) as r:
r.raise_for_status()
for chunk in r.iter_content(chunk_size=8192):
temp_file.write(chunk)
temp_file.seek(0)
reader = PDFReader()
return reader.load_data(
temp_file_path, extra_info={DB_DOC_ID_KEY: str(document.id)}
)
def build_description_for_document(document: DocumentSchema) -> str:
if DocumentMetadataKeysEnum.SEC_DOCUMENT in document.metadata_map:
sec_metadata = SecDocumentMetadata.parse_obj(
document.metadata_map[DocumentMetadataKeysEnum.SEC_DOCUMENT]
)
time_period = (
f"{sec_metadata.year} Q{sec_metadata.quarter}"
if sec_metadata.quarter
else str(sec_metadata.year)
)
return f"A SEC {sec_metadata.doc_type.value} filing describing the financials of {sec_metadata.company_name} ({sec_metadata.company_ticker}) for the {time_period} time period."
return "A document containing useful information that the user pre-selected to discuss with the assistant."
def index_to_query_engine(doc_id: str, index: VectorStoreIndex) -> BaseQueryEngine:
filters = MetadataFilters(
filters=[ExactMatchFilter(key=DB_DOC_ID_KEY, value=doc_id)]
)
kwargs = {"similarity_top_k": 3, "filters": filters}
return index.as_query_engine(**kwargs)
@cached(
TTLCache(maxsize=10, ttl=timedelta(minutes=5).total_seconds()),
key=lambda *args, **kwargs: "global_storage_context",
)
def get_storage_context(
persist_dir: str, vector_store: VectorStore, fs: Optional[AsyncFileSystem] = None
) -> StorageContext:
logger.info("Creating new storage context.")
return StorageContext.from_defaults(
persist_dir=persist_dir, vector_store=vector_store, fs=fs
)
async def build_doc_id_to_index_map(
service_context: ServiceContext,
documents: List[DocumentSchema],
fs: Optional[AsyncFileSystem] = None,
) -> Dict[str, VectorStoreIndex]:
persist_dir = f"{settings.S3_BUCKET_NAME}"
vector_store = await get_vector_store_singleton()
try:
try:
storage_context = get_storage_context(persist_dir, vector_store, fs=fs)
except FileNotFoundError:
logger.info(
"Could not find storage context in S3. Creating new storage context."
)
storage_context = StorageContext.from_defaults(
vector_store=vector_store, fs=fs
)
storage_context.persist(persist_dir=persist_dir, fs=fs)
index_ids = [str(doc.id) for doc in documents]
indices = load_indices_from_storage(
storage_context,
index_ids=index_ids,
service_context=service_context,
)
doc_id_to_index = dict(zip(index_ids, indices))
logger.debug("Loaded indices from storage.")
except ValueError:
logger.error(
"Failed to load indices from storage. Creating new indices. "
"If you're running the seed_db script, this is normal and expected."
)
storage_context = StorageContext.from_defaults(
persist_dir=persist_dir, vector_store=vector_store, fs=fs
)
doc_id_to_index = {}
for doc in documents:
llama_index_docs = fetch_and_read_document(doc)
storage_context.docstore.add_documents(llama_index_docs)
index = VectorStoreIndex.from_documents(
llama_index_docs,
storage_context=storage_context,
service_context=service_context,
)
index.set_index_id(str(doc.id))
index.storage_context.persist(persist_dir=persist_dir, fs=fs)
doc_id_to_index[str(doc.id)] = index
return doc_id_to_index
def get_chat_history(
chat_messages: List[MessageSchema],
) -> List[ChatMessage]:
"""
Given a list of chat messages, return a list of ChatMessage instances.
Failed chat messages are filtered out and then the remaining ones are
sorted by created_at.
"""
# pre-process chat messages
chat_messages = [
m
for m in chat_messages
if m.content.strip() and m.status == MessageStatusEnum.SUCCESS
]
# TODO: could be a source of high CPU utilization
chat_messages = sorted(chat_messages, key=lambda m: m.created_at)
chat_history = []
for message in chat_messages:
role = (
MessageRole.ASSISTANT
if message.role == MessageRoleEnum.assistant
else MessageRole.USER
)
chat_history.append(ChatMessage(content=message.content, role=role))
return chat_history
def get_tool_service_context(
callback_handlers: List[BaseCallbackHandler],
) -> ServiceContext:
llm = OpenAI(
temperature=0,
model=OPENAI_TOOL_LLM_NAME,
streaming=False,
api_key=settings.OPENAI_API_KEY,
)
callback_manager = CallbackManager(callback_handlers)
embedding_model = OpenAIEmbedding(
mode=OpenAIEmbeddingMode.SIMILARITY_MODE,
model_type=OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002,
api_key=settings.OPENAI_API_KEY,
)
# Use a smaller chunk size to retrieve more granular results
node_parser = SentenceSplitter.from_defaults(
chunk_size=NODE_PARSER_CHUNK_SIZE,
chunk_overlap=NODE_PARSER_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
service_context = ServiceContext.from_defaults(
callback_manager=callback_manager,
llm=llm,
embed_model=embedding_model,
node_parser=node_parser,
)
return service_context
async def get_chat_engine(
callback_handler: BaseCallbackHandler,
conversation: ConversationSchema,
) -> OpenAIAgent:
service_context = get_tool_service_context([callback_handler])
s3_fs = get_s3_fs()
doc_id_to_index = await build_doc_id_to_index_map(
service_context, conversation.documents, fs=s3_fs
)
id_to_doc: Dict[str, DocumentSchema] = {
str(doc.id): doc for doc in conversation.documents
}
vector_query_engine_tools = [
QueryEngineTool(
query_engine=index_to_query_engine(doc_id, index),
metadata=ToolMetadata(
name=doc_id,
description=build_description_for_document(id_to_doc[doc_id]),
),
)
for doc_id, index in doc_id_to_index.items()
]
response_synth = get_custom_response_synth(service_context, conversation.documents)
qualitative_question_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=vector_query_engine_tools,
service_context=service_context,
response_synthesizer=response_synth,
verbose=settings.VERBOSE,
use_async=True,
)
api_query_engine_tools = [
get_api_query_engine_tool(doc, service_context)
for doc in conversation.documents
if DocumentMetadataKeysEnum.SEC_DOCUMENT in doc.metadata_map
]
quantitative_question_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=api_query_engine_tools,
service_context=service_context,
response_synthesizer=response_synth,
verbose=settings.VERBOSE,
use_async=True,
)
top_level_sub_tools = [
QueryEngineTool(
query_engine=qualitative_question_engine,
metadata=ToolMetadata(
name="qualitative_question_engine",
description="""
A query engine that can answer qualitative questions about a set of SEC financial documents that the user pre-selected for the conversation.
Any questions about company-related headwinds, tailwinds, risks, sentiments, or administrative information should be asked here.
""".strip(),
),
),
QueryEngineTool(
query_engine=quantitative_question_engine,
metadata=ToolMetadata(
name="quantitative_question_engine",
description="""
A query engine that can answer quantitative questions about a set of SEC financial documents that the user pre-selected for the conversation.
Any questions about company-related financials or other metrics should be asked here.
""".strip(),
),
),
]
chat_llm = OpenAI(
temperature=0,
model=OPENAI_CHAT_LLM_NAME,
streaming=True,
api_key=settings.OPENAI_API_KEY,
)
chat_messages: List[MessageSchema] = conversation.messages
chat_history = get_chat_history(chat_messages)
logger.debug("Chat history: %s", chat_history)
if conversation.documents:
doc_titles = "\n".join(
"- " + build_title_for_document(doc) for doc in conversation.documents
)
else:
doc_titles = "No documents selected."
curr_date = datetime.utcnow().strftime("%Y-%m-%d")
chat_engine = OpenAIAgent.from_tools(
tools=top_level_sub_tools,
llm=chat_llm,
chat_history=chat_history,
verbose=settings.VERBOSE,
system_prompt=SYSTEM_MESSAGE.format(doc_titles=doc_titles, curr_date=curr_date),
callback_manager=service_context.callback_manager,
max_function_calls=3,
)
return chat_engine
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.readers.file.docs_reader.PDFReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.llms.ChatMessage",
"llama_index.load_indices_from_storage",
"llama_index.node_parser.SentenceSplitter.from_defaults",
"llama_index.callbacks.base.CallbackManager",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults",
"llama_index.embeddings.openai.OpenAIEmbedding",
"llama_index.vector_stores.types.ExactMatchFilter"
] | [((1919, 1946), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1936, 1946), False, 'import logging\n'), ((1994, 2014), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (2012, 2014), False, 'import nest_asyncio\n'), ((2151, 2261), 's3fs.S3FileSystem', 's3fs.S3FileSystem', ([], {'key': 'settings.AWS_KEY', 'secret': 'settings.AWS_SECRET', 'endpoint_url': 'settings.S3_ENDPOINT_URL'}), '(key=settings.AWS_KEY, secret=settings.AWS_SECRET,\n endpoint_url=settings.S3_ENDPOINT_URL)\n', (2168, 2261), False, 'import s3fs\n'), ((4593, 4685), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir', 'vector_store': 'vector_store', 'fs': 'fs'}), '(persist_dir=persist_dir, vector_store=\n vector_store, fs=fs)\n', (4621, 4685), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_indices_from_storage\n'), ((7695, 7799), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': 'OPENAI_TOOL_LLM_NAME', 'streaming': '(False)', 'api_key': 'settings.OPENAI_API_KEY'}), '(temperature=0, model=OPENAI_TOOL_LLM_NAME, streaming=False, api_key=\n settings.OPENAI_API_KEY)\n', (7701, 7799), False, 'from llama_index.llms import ChatMessage, OpenAI\n'), ((7857, 7891), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['callback_handlers'], {}), '(callback_handlers)\n', (7872, 7891), False, 'from llama_index.callbacks.base import BaseCallbackHandler, CallbackManager\n'), ((7914, 8070), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'mode': 'OpenAIEmbeddingMode.SIMILARITY_MODE', 'model_type': 'OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002', 'api_key': 'settings.OPENAI_API_KEY'}), '(mode=OpenAIEmbeddingMode.SIMILARITY_MODE, model_type=\n OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002, api_key=settings.\n OPENAI_API_KEY)\n', (7929, 8070), False, 'from llama_index.embeddings.openai import OpenAIEmbedding, OpenAIEmbeddingMode, OpenAIEmbeddingModelType\n'), ((8175, 8320), 'llama_index.node_parser.SentenceSplitter.from_defaults', 'SentenceSplitter.from_defaults', ([], {'chunk_size': 'NODE_PARSER_CHUNK_SIZE', 'chunk_overlap': 'NODE_PARSER_CHUNK_OVERLAP', 'callback_manager': 'callback_manager'}), '(chunk_size=NODE_PARSER_CHUNK_SIZE,\n chunk_overlap=NODE_PARSER_CHUNK_OVERLAP, callback_manager=callback_manager)\n', (8205, 8320), False, 'from llama_index.node_parser import SentenceSplitter\n'), ((8370, 8500), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'callback_manager': 'callback_manager', 'llm': 'llm', 'embed_model': 'embedding_model', 'node_parser': 'node_parser'}), '(callback_manager=callback_manager, llm=llm,\n embed_model=embedding_model, node_parser=node_parser)\n', (8398, 8500), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_indices_from_storage\n'), ((9383, 9449), 'app.chat.qa_response_synth.get_custom_response_synth', 'get_custom_response_synth', (['service_context', 'conversation.documents'], {}), '(service_context, conversation.documents)\n', (9408, 9449), False, 'from app.chat.qa_response_synth import get_custom_response_synth\n'), ((9485, 9692), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'vector_query_engine_tools', 'service_context': 'service_context', 'response_synthesizer': 'response_synth', 'verbose': 'settings.VERBOSE', 'use_async': '(True)'}), '(query_engine_tools=\n vector_query_engine_tools, service_context=service_context,\n response_synthesizer=response_synth, verbose=settings.VERBOSE,\n use_async=True)\n', (9521, 9692), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((9968, 10172), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'api_query_engine_tools', 'service_context': 'service_context', 'response_synthesizer': 'response_synth', 'verbose': 'settings.VERBOSE', 'use_async': '(True)'}), '(query_engine_tools=\n api_query_engine_tools, service_context=service_context,\n response_synthesizer=response_synth, verbose=settings.VERBOSE,\n use_async=True)\n', (10004, 10172), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((11232, 11335), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': 'OPENAI_CHAT_LLM_NAME', 'streaming': '(True)', 'api_key': 'settings.OPENAI_API_KEY'}), '(temperature=0, model=OPENAI_CHAT_LLM_NAME, streaming=True, api_key=\n settings.OPENAI_API_KEY)\n', (11238, 11335), False, 'from llama_index.llms import ChatMessage, OpenAI\n'), ((2674, 2694), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (2692, 2694), False, 'from tempfile import TemporaryDirectory\n'), ((3391, 3487), 'app.schema.SecDocumentMetadata.parse_obj', 'SecDocumentMetadata.parse_obj', (['document.metadata_map[DocumentMetadataKeysEnum.SEC_DOCUMENT]'], {}), '(document.metadata_map[\n DocumentMetadataKeysEnum.SEC_DOCUMENT])\n', (3420, 3487), False, 'from app.schema import Message as MessageSchema, Document as DocumentSchema, Conversation as ConversationSchema, DocumentMetadataKeysEnum, SecDocumentMetadata\n'), ((4957, 4985), 'app.chat.pg_vector.get_vector_store_singleton', 'get_vector_store_singleton', ([], {}), '()\n', (4983, 4985), False, 'from app.chat.pg_vector import get_vector_store_singleton\n'), ((5515, 5615), 'llama_index.load_indices_from_storage', 'load_indices_from_storage', (['storage_context'], {'index_ids': 'index_ids', 'service_context': 'service_context'}), '(storage_context, index_ids=index_ids,\n service_context=service_context)\n', (5540, 5615), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_indices_from_storage\n'), ((9767, 9814), 'app.chat.tools.get_api_query_engine_tool', 'get_api_query_engine_tool', (['doc', 'service_context'], {}), '(doc, service_context)\n', (9792, 9814), False, 'from app.chat.tools import get_api_query_engine_tool\n'), ((2733, 2747), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (2737, 2747), False, 'from pathlib import Path\n'), ((3086, 3097), 'llama_index.readers.file.docs_reader.PDFReader', 'PDFReader', ([], {}), '()\n', (3095, 3097), False, 'from llama_index.readers.file.docs_reader import PDFReader\n'), ((6004, 6096), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir', 'vector_store': 'vector_store', 'fs': 'fs'}), '(persist_dir=persist_dir, vector_store=\n vector_store, fs=fs)\n', (6032, 6096), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_indices_from_storage\n'), ((7508, 7555), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'content': 'message.content', 'role': 'role'}), '(content=message.content, role=role)\n', (7519, 7555), False, 'from llama_index.llms import ChatMessage, OpenAI\n'), ((11765, 11782), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (11780, 11782), False, 'from datetime import datetime\n'), ((11994, 12059), 'app.chat.constants.SYSTEM_MESSAGE.format', 'SYSTEM_MESSAGE.format', ([], {'doc_titles': 'doc_titles', 'curr_date': 'curr_date'}), '(doc_titles=doc_titles, curr_date=curr_date)\n', (12015, 12059), False, 'from app.chat.constants import DB_DOC_ID_KEY, SYSTEM_MESSAGE, NODE_PARSER_CHUNK_OVERLAP, NODE_PARSER_CHUNK_SIZE\n'), ((2847, 2886), 'requests.get', 'requests.get', (['document.url'], {'stream': '(True)'}), '(document.url, stream=True)\n', (2859, 2886), False, 'import requests\n'), ((4105, 4154), 'llama_index.vector_stores.types.ExactMatchFilter', 'ExactMatchFilter', ([], {'key': 'DB_DOC_ID_KEY', 'value': 'doc_id'}), '(key=DB_DOC_ID_KEY, value=doc_id)\n', (4121, 4154), False, 'from llama_index.vector_stores.types import MetadataFilters, ExactMatchFilter\n'), ((5281, 5343), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store', 'fs': 'fs'}), '(vector_store=vector_store, fs=fs)\n', (5309, 5343), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_indices_from_storage\n'), ((6322, 6442), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['llama_index_docs'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(llama_index_docs, storage_context=\n storage_context, service_context=service_context)\n', (6353, 6442), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_indices_from_storage\n'), ((4302, 4322), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (4311, 4322), False, 'from datetime import timedelta\n'), ((11618, 11647), 'app.chat.utils.build_title_for_document', 'build_title_for_document', (['doc'], {}), '(doc)\n', (11642, 11647), False, 'from app.chat.utils import build_title_for_document\n')] |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import faiss, os
from llama_index.vector_stores import FaissVectorStore
from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index import StorageContext, load_index_from_storage
from llama_index.vector_stores.simple import SimpleVectorStore
from llama_index.storage.docstore.simple_docstore import SimpleDocumentStore
from llama_index.storage.index_store.simple_index_store import SimpleIndexStore
class FaissEmbeddingStorage:
def __init__(self, data_dir, dimension=384):
self.d = dimension
self.data_dir = data_dir
self.index = self.initialize_index()
def initialize_index(self):
if os.path.exists("storage-default") and os.listdir("storage-default"):
print("Using the persisted value")
vector_store = FaissVectorStore.from_persist_dir("storage-default")
storage_context = StorageContext.from_defaults(
vector_store=vector_store, persist_dir="storage-default"
)
index = load_index_from_storage(storage_context=storage_context)
return index
else:
print("generating new values")
documents = SimpleDirectoryReader(self.data_dir).load_data()
faiss_index = faiss.IndexFlatL2(self.d)
vector_store = FaissVectorStore(faiss_index=faiss_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
index.storage_context.persist(persist_dir = "storage-default")
return index
def get_query_engine(self):
return self.index.as_query_engine()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.FaissVectorStore",
"llama_index.vector_stores.FaissVectorStore.from_persist_dir",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((1848, 1881), 'os.path.exists', 'os.path.exists', (['"""storage-default"""'], {}), "('storage-default')\n", (1862, 1881), False, 'import faiss, os\n'), ((1886, 1915), 'os.listdir', 'os.listdir', (['"""storage-default"""'], {}), "('storage-default')\n", (1896, 1915), False, 'import faiss, os\n'), ((1991, 2043), 'llama_index.vector_stores.FaissVectorStore.from_persist_dir', 'FaissVectorStore.from_persist_dir', (['"""storage-default"""'], {}), "('storage-default')\n", (2024, 2043), False, 'from llama_index.vector_stores import FaissVectorStore\n'), ((2074, 2165), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store', 'persist_dir': '"""storage-default"""'}), "(vector_store=vector_store, persist_dir=\n 'storage-default')\n", (2102, 2165), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((2211, 2267), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context'}), '(storage_context=storage_context)\n', (2234, 2267), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((2449, 2474), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['self.d'], {}), '(self.d)\n', (2466, 2474), False, 'import faiss, os\n'), ((2502, 2543), 'llama_index.vector_stores.FaissVectorStore', 'FaissVectorStore', ([], {'faiss_index': 'faiss_index'}), '(faiss_index=faiss_index)\n', (2518, 2543), False, 'from llama_index.vector_stores import FaissVectorStore\n'), ((2574, 2629), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2602, 2629), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((2650, 2725), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (2681, 2725), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document\n'), ((2374, 2410), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['self.data_dir'], {}), '(self.data_dir)\n', (2395, 2410), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document\n')] |
"""Memory for agents."""
import os
from llama_index.core import GPTVectorStoreIndex, GPTListIndex
from llama_index.core import Document, ServiceContext
from llama_index.legacy import LLMPredictor
from llama_index.core import StorageContext, load_index_from_storage
from langchain_community.chat_models import ChatOpenAI
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# https://gpt-index.readthedocs.io/en/latest/guides/index_guide.html
INDEX_TYPES = {
# Good for retrieval, because of top_k and embeddings.
"vector": GPTVectorStoreIndex,
# Good for aggregate summaries, but slow.
"list": GPTListIndex,
}
LLM_PREDICTOR_TYPES = {
"gpt-3.5-turbo": ChatOpenAI,
"gpt-3.5-turbo-16k": ChatOpenAI,
"gpt-4": ChatOpenAI,
}
class Memory:
def __init__(self, memory_folder=None, index_type="vector", llm_predictor="gpt-3.5-turbo"):
assert index_type in INDEX_TYPES, f"Invalid index type: {index_type}"
assert llm_predictor in LLM_PREDICTOR_TYPES, f"Invalid LLM predictor: {llm_predictor}"
self.texts = []
llm_kwargs = {"temperature": 0, "model_name": llm_predictor}
predictor_constructor = LLM_PREDICTOR_TYPES[llm_predictor]
llm = LLMPredictor(llm=predictor_constructor(**llm_kwargs))
service_context = ServiceContext.from_defaults(llm_predictor=llm)
if memory_folder and os.path.exists(memory_folder):
logger.info("Loading memory from disk.")
storage_context = StorageContext.from_defaults(persist_dir=memory_folder)
self.index = load_index_from_storage(storage_context)
else:
self.index = INDEX_TYPES[index_type].from_documents([], service_context=service_context)
self.llm_predictor = llm_predictor
def query(self, prompt, similarity_top_k=3):
query_engine = self.index.as_query_engine(similarity_top_k=similarity_top_k)
return query_engine.query(prompt)
def add(self, text):
if text in self.texts:
logger.info("Skipping duplicate text.")
return
self.texts.append(text)
self.index.insert(Document(text=text))
def save(self, path):
self.index.storage_context.persist(path) | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.ServiceContext.from_defaults",
"llama_index.core.Document"
] | [((337, 376), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (356, 376), False, 'import logging\n'), ((386, 413), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (403, 413), False, 'import logging\n'), ((1330, 1377), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm'}), '(llm_predictor=llm)\n', (1358, 1377), False, 'from llama_index.core import Document, ServiceContext\n'), ((1408, 1437), 'os.path.exists', 'os.path.exists', (['memory_folder'], {}), '(memory_folder)\n', (1422, 1437), False, 'import os\n'), ((1522, 1577), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'memory_folder'}), '(persist_dir=memory_folder)\n', (1550, 1577), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((1603, 1643), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1626, 1643), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((2165, 2184), 'llama_index.core.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2173, 2184), False, 'from llama_index.core import Document, ServiceContext\n')] |
import logging
from typing import List, Optional
import requests
from llama_index.callbacks.base import CallbackManager
from llama_index.constants import DEFAULT_SIMILARITY_TOP_K
from llama_index.core.base_retriever import BaseRetriever
from llama_index.indices.managed.zilliz.base import ZillizCloudPipelineIndex
from llama_index.indices.query.schema import QueryBundle
from llama_index.schema import NodeWithScore, QueryBundle, TextNode
from llama_index.vector_stores.types import MetadataFilters
logger = logging.getLogger(__name__)
class ZillizCloudPipelineRetriever(BaseRetriever):
"""A retriever built on top of Zilliz Cloud Pipeline's index."""
def __init__(
self,
index: ZillizCloudPipelineIndex,
search_top_k: int = DEFAULT_SIMILARITY_TOP_K,
filters: Optional[MetadataFilters] = None,
offset: int = 0,
output_metadata: list = [],
callback_manager: Optional[CallbackManager] = None,
) -> None:
self.search_top_k = search_top_k
if filters:
exprs = []
for fil in filters.filters:
expr = f"{fil.key} == '{fil.value}'"
exprs.append(expr)
self.filter = " && ".join(exprs)
else:
self.filter = ""
self.offset = offset
search_pipe_id = index.pipeline_ids.get("SEARCH")
self.search_pipeline_url = f"{index.domain}/{search_pipe_id}/run"
self.headers = index.headers
self.output_fields = output_metadata
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
params = {
"data": {"query_text": query_bundle.query_str},
"params": {
"limit": self.search_top_k,
"offset": self.offset,
"outputFields": ["chunk_text", *self.output_fields],
"filter": self.filter,
},
}
response = requests.post(
self.search_pipeline_url, headers=self.headers, json=params
)
if response.status_code != 200:
raise RuntimeError(response.text)
response_dict = response.json()
if response_dict["code"] != 200:
raise RuntimeError(response_dict)
response_data = response_dict["data"]
top_nodes = []
for search_res in response_data["result"]:
text = search_res.pop("chunk_text")
entity_id = search_res.pop("id")
distance = search_res.pop("distance")
node = NodeWithScore(
node=TextNode(text=text, id_=entity_id, metadata=search_res),
score=distance,
)
top_nodes.append(node)
return top_nodes | [
"llama_index.schema.TextNode"
] | [((511, 538), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (528, 538), False, 'import logging\n'), ((1978, 2052), 'requests.post', 'requests.post', (['self.search_pipeline_url'], {'headers': 'self.headers', 'json': 'params'}), '(self.search_pipeline_url, headers=self.headers, json=params)\n', (1991, 2052), False, 'import requests\n'), ((2607, 2662), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'id_': 'entity_id', 'metadata': 'search_res'}), '(text=text, id_=entity_id, metadata=search_res)\n', (2615, 2662), False, 'from llama_index.schema import NodeWithScore, QueryBundle, TextNode\n')] |
import sys
import os
# this is needed to import classes from the API. it will be removed when the worker is refactored
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
import logging
import json
import pika
import time
import fitz
from pathlib import Path
from shared.job_status import JobStatus
from shared.batch_status import BatchStatus
from services.database.database import safe_db_operation
from models.batch import Batch
from services.database import batch_service, job_service
from services.minio.minio_service import create_minio_client
from docx import Document
from io import BytesIO
from llama_index import download_loader
from shared.vectorflow_request import VectorflowRequest
from services.rabbitmq.rabbit_service import create_connection_params
from pika.exceptions import AMQPConnectionError
logging.basicConfig(filename='./extract-log.txt', level=logging.INFO)
logging.basicConfig(filename='./extract-error-log.txt', level=logging.ERROR)
def extract_file(filename, vectorflow_request_dict, job_id):
safe_db_operation(job_service.update_job_status, job_id, JobStatus.CREATING_BATCHES)
logging.info("Extracting VectorflowRequest from dictionary")
vectorflow_request = VectorflowRequest._from_dict(vectorflow_request_dict)
logging.info(f"Extracting file {filename} from Minio")
minio_client = create_minio_client()
minio_client.fget_object(os.getenv('MINIO_BUCKET'), filename, filename)
logging.info(f"Processing file {filename}")
try:
batch_count = process_file_from_disk(filename, vectorflow_request, job_id)
except Exception as e:
logging.error('Error processing file: %s', e)
safe_db_operation(job_service.update_job_status, job_id, JobStatus.FAILED)
# TODO: remove this when retry is implemented
remove_from_minio(filename)
os.remove(filename)
return
if not batch_count:
safe_db_operation(job_service.update_job_status, job_id, JobStatus.FAILED)
logging.error(f"No batches created for job {job_id}")
# TODO: remove this when retry is implemented
remove_from_minio(filename)
return
remove_from_minio(filename)
os.remove(filename)
logging.info("File removed from Minio and local storage")
logging.info(f"Created {batch_count} batches")
def process_file_from_disk(file_path, vectorflow_request, job_id):
with open(file_path, 'rb') as f:
content = f.read()
filename = Path(file_path).name
if filename.endswith('.txt'):
file_content = content.decode('utf-8')
elif filename.endswith('.docx'):
doc = Document(file_path)
file_content = "\n".join([paragraph.text for paragraph in doc.paragraphs])
elif filename.endswith('.md'):
temp_file_path = Path('./temp_file.md')
with open(temp_file_path, 'wb') as temp_file:
temp_file.write(content)
MarkdownReader = download_loader("MarkdownReader")
loader = MarkdownReader()
documents = loader.load_data(file=temp_file_path)
file_content = "\n".join([document.text for document in documents])
temp_file_path.unlink()
elif filename.endswith('.html'):
file_content = repr(content.decode('utf-8'))
else:
with fitz.open(stream=BytesIO(content), filetype='pdf') as doc:
file_content = ""
for page in doc:
file_content += page.get_text()
batch_count = create_batches(file_content, job_id, vectorflow_request)
return batch_count
def create_batches(file_content, job_id, vectorflow_request):
logging.info("Creating batches")
chunks = [chunk for chunk in split_file(file_content, vectorflow_request.lines_per_batch)]
batches = [Batch(job_id=job_id, embeddings_metadata=vectorflow_request.embeddings_metadata, vector_db_metadata=vectorflow_request.vector_db_metadata) for _ in chunks]
batches = safe_db_operation(batch_service.create_batches, batches)
job = safe_db_operation(job_service.update_job_total_batches, job_id, len(batches))
for batch, chunk in zip(batches, chunks):
try:
logging.info("Publishing message to RabbitMQ")
data = (batch.id, chunk, vectorflow_request.vector_db_key, vectorflow_request.embedding_api_key)
json_data = json.dumps(data)
publish_channel.basic_publish(exchange='',
routing_key=os.getenv('EMBEDDING_QUEUE'),
body=json_data)
logging.info("Message published successfully")
except Exception as e:
logging.error('Error publishing message to RabbitMQ: %s', e)
# TODO: Add retry
update_batch_and_job_status(batch.id, BatchStatus.FAILED) # TODO: add batch failure logic here
return job.total_batches if job else None
def split_file(file_content, lines_per_chunk=1000):
lines = file_content.splitlines()
for i in range(0, len(lines), lines_per_chunk):
yield lines[i:i+lines_per_chunk]
def remove_from_minio(filename):
client = create_minio_client()
client.remove_object(os.getenv("MINIO_BUCKET"), filename)
# TODO: refactor into utils
def update_batch_and_job_status(job_id, batch_status, batch_id):
try:
if not job_id and batch_id:
job = safe_db_operation(batch_service.get_batch, batch_id)
job_id = job.job_id
updated_batch_status = safe_db_operation(batch_service.update_batch_status, batch_id, batch_status)
job = safe_db_operation(job_service.update_job_with_batch, job_id, updated_batch_status)
if job.job_status == JobStatus.COMPLETED:
logging.info(f"Job {job_id} completed successfully")
elif job.job_status == JobStatus.PARTIALLY_COMPLETED:
logging.info(f"Job {job_id} partially completed. {job.batches_succeeded} out of {job.total_batches} batches succeeded")
elif job.job_status == JobStatus.FAILED:
logging.info(f"Job {job_id} failed. {job.batches_succeeded} out of {job.total_batches} batches succeeded")
except Exception as e:
logging.error('Error updating job and batch status: %s', e)
safe_db_operation(job_service.update_job_status, job_id, JobStatus.FAILED)
####################
## RabbitMQ Logic ##
####################
def callback(ch, method, properties, body):
# do these outside the try-catch so it can update the batch status if there's an error
# if this parsing logic fails, the batch shouldn't be marked as failed
data = json.loads(body)
job_id, filename, vectorflow_request = data
try:
logging.info("Batch retrieved successfully")
extract_file(filename, vectorflow_request, job_id)
logging.info("Batch processed successfully")
except Exception as e:
logging.error('Error processing batch: %s', e)
safe_db_operation(job_service.update_job_status, job_id, JobStatus.FAILED)
ch.basic_ack(delivery_tag=method.delivery_tag)
def start_connection(max_retries=5, retry_delay=5):
global publish_channel
global connection
for attempt in range(max_retries):
try:
connection_params = create_connection_params()
connection = pika.BlockingConnection(connection_params)
consume_channel = connection.channel()
publish_channel = connection.channel()
consume_queue_name = os.getenv('EXTRACTION_QUEUE')
publish_queue_name = os.getenv('EMBEDDING_QUEUE')
consume_channel.queue_declare(queue=consume_queue_name)
publish_channel.queue_declare(queue=publish_queue_name)
consume_channel.basic_consume(queue=consume_queue_name, on_message_callback=callback)
logging.info('Waiting for messages.')
consume_channel.start_consuming()
return # If successful, exit the function
except AMQPConnectionError as e:
logging.error('AMQP Connection Error: %s', e)
except Exception as e:
logging.error('Unexpected error: %s', e)
finally:
if connection and not connection.is_closed:
connection.close()
logging.info('Retrying to connect in %s seconds (Attempt %s/%s)', retry_delay, attempt + 1, max_retries)
time.sleep(retry_delay)
raise Exception('Failed to connect after {} attempts'.format(max_retries))
if __name__ == "__main__":
while True:
try:
start_connection()
except Exception as e:
logging.error('Error in start_connection: %s', e)
logging.info('Restarting start_connection after encountering an error.')
time.sleep(10) | [
"llama_index.download_loader"
] | [((847, 916), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./extract-log.txt"""', 'level': 'logging.INFO'}), "(filename='./extract-log.txt', level=logging.INFO)\n", (866, 916), False, 'import logging\n'), ((917, 993), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./extract-error-log.txt"""', 'level': 'logging.ERROR'}), "(filename='./extract-error-log.txt', level=logging.ERROR)\n", (936, 993), False, 'import logging\n'), ((1060, 1149), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.update_job_status', 'job_id', 'JobStatus.CREATING_BATCHES'], {}), '(job_service.update_job_status, job_id, JobStatus.\n CREATING_BATCHES)\n', (1077, 1149), False, 'from services.database.database import safe_db_operation\n'), ((1150, 1210), 'logging.info', 'logging.info', (['"""Extracting VectorflowRequest from dictionary"""'], {}), "('Extracting VectorflowRequest from dictionary')\n", (1162, 1210), False, 'import logging\n'), ((1236, 1289), 'shared.vectorflow_request.VectorflowRequest._from_dict', 'VectorflowRequest._from_dict', (['vectorflow_request_dict'], {}), '(vectorflow_request_dict)\n', (1264, 1289), False, 'from shared.vectorflow_request import VectorflowRequest\n'), ((1295, 1349), 'logging.info', 'logging.info', (['f"""Extracting file {filename} from Minio"""'], {}), "(f'Extracting file {filename} from Minio')\n", (1307, 1349), False, 'import logging\n'), ((1369, 1390), 'services.minio.minio_service.create_minio_client', 'create_minio_client', ([], {}), '()\n', (1388, 1390), False, 'from services.minio.minio_service import create_minio_client\n'), ((1472, 1515), 'logging.info', 'logging.info', (['f"""Processing file {filename}"""'], {}), "(f'Processing file {filename}')\n", (1484, 1515), False, 'import logging\n'), ((2219, 2238), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (2228, 2238), False, 'import os\n'), ((2244, 2301), 'logging.info', 'logging.info', (['"""File removed from Minio and local storage"""'], {}), "('File removed from Minio and local storage')\n", (2256, 2301), False, 'import logging\n'), ((2306, 2352), 'logging.info', 'logging.info', (['f"""Created {batch_count} batches"""'], {}), "(f'Created {batch_count} batches')\n", (2318, 2352), False, 'import logging\n'), ((3662, 3694), 'logging.info', 'logging.info', (['"""Creating batches"""'], {}), "('Creating batches')\n", (3674, 3694), False, 'import logging\n'), ((3980, 4036), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.create_batches', 'batches'], {}), '(batch_service.create_batches, batches)\n', (3997, 4036), False, 'from services.database.database import safe_db_operation\n'), ((5171, 5192), 'services.minio.minio_service.create_minio_client', 'create_minio_client', ([], {}), '()\n', (5190, 5192), False, 'from services.minio.minio_service import create_minio_client\n'), ((6660, 6676), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (6670, 6676), False, 'import json\n'), ((1420, 1445), 'os.getenv', 'os.getenv', (['"""MINIO_BUCKET"""'], {}), "('MINIO_BUCKET')\n", (1429, 1445), False, 'import os\n'), ((1939, 2013), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.update_job_status', 'job_id', 'JobStatus.FAILED'], {}), '(job_service.update_job_status, job_id, JobStatus.FAILED)\n', (1956, 2013), False, 'from services.database.database import safe_db_operation\n'), ((2022, 2075), 'logging.error', 'logging.error', (['f"""No batches created for job {job_id}"""'], {}), "(f'No batches created for job {job_id}')\n", (2035, 2075), False, 'import logging\n'), ((2501, 2516), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (2505, 2516), False, 'from pathlib import Path\n'), ((3810, 3958), 'models.batch.Batch', 'Batch', ([], {'job_id': 'job_id', 'embeddings_metadata': 'vectorflow_request.embeddings_metadata', 'vector_db_metadata': 'vectorflow_request.vector_db_metadata'}), '(job_id=job_id, embeddings_metadata=vectorflow_request.\n embeddings_metadata, vector_db_metadata=vectorflow_request.\n vector_db_metadata)\n', (3815, 3958), False, 'from models.batch import Batch\n'), ((5218, 5243), 'os.getenv', 'os.getenv', (['"""MINIO_BUCKET"""'], {}), "('MINIO_BUCKET')\n", (5227, 5243), False, 'import os\n'), ((5528, 5604), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.update_batch_status', 'batch_id', 'batch_status'], {}), '(batch_service.update_batch_status, batch_id, batch_status)\n', (5545, 5604), False, 'from services.database.database import safe_db_operation\n'), ((5619, 5705), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.update_job_with_batch', 'job_id', 'updated_batch_status'], {}), '(job_service.update_job_with_batch, job_id,\n updated_batch_status)\n', (5636, 5705), False, 'from services.database.database import safe_db_operation\n'), ((6747, 6791), 'logging.info', 'logging.info', (['"""Batch retrieved successfully"""'], {}), "('Batch retrieved successfully')\n", (6759, 6791), False, 'import logging\n'), ((6859, 6903), 'logging.info', 'logging.info', (['"""Batch processed successfully"""'], {}), "('Batch processed successfully')\n", (6871, 6903), False, 'import logging\n'), ((8320, 8428), 'logging.info', 'logging.info', (['"""Retrying to connect in %s seconds (Attempt %s/%s)"""', 'retry_delay', '(attempt + 1)', 'max_retries'], {}), "('Retrying to connect in %s seconds (Attempt %s/%s)',\n retry_delay, attempt + 1, max_retries)\n", (8332, 8428), False, 'import logging\n'), ((8433, 8456), 'time.sleep', 'time.sleep', (['retry_delay'], {}), '(retry_delay)\n', (8443, 8456), False, 'import time\n'), ((168, 193), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (183, 193), False, 'import os\n'), ((1643, 1688), 'logging.error', 'logging.error', (['"""Error processing file: %s"""', 'e'], {}), "('Error processing file: %s', e)\n", (1656, 1688), False, 'import logging\n'), ((1697, 1771), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.update_job_status', 'job_id', 'JobStatus.FAILED'], {}), '(job_service.update_job_status, job_id, JobStatus.FAILED)\n', (1714, 1771), False, 'from services.database.database import safe_db_operation\n'), ((1871, 1890), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (1880, 1890), False, 'import os\n'), ((2660, 2679), 'docx.Document', 'Document', (['file_path'], {}), '(file_path)\n', (2668, 2679), False, 'from docx import Document\n'), ((4198, 4244), 'logging.info', 'logging.info', (['"""Publishing message to RabbitMQ"""'], {}), "('Publishing message to RabbitMQ')\n", (4210, 4244), False, 'import logging\n'), ((4378, 4394), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4388, 4394), False, 'import json\n'), ((4600, 4646), 'logging.info', 'logging.info', (['"""Message published successfully"""'], {}), "('Message published successfully')\n", (4612, 4646), False, 'import logging\n'), ((5412, 5464), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.get_batch', 'batch_id'], {}), '(batch_service.get_batch, batch_id)\n', (5429, 5464), False, 'from services.database.database import safe_db_operation\n'), ((5764, 5816), 'logging.info', 'logging.info', (['f"""Job {job_id} completed successfully"""'], {}), "(f'Job {job_id} completed successfully')\n", (5776, 5816), False, 'import logging\n'), ((6231, 6290), 'logging.error', 'logging.error', (['"""Error updating job and batch status: %s"""', 'e'], {}), "('Error updating job and batch status: %s', e)\n", (6244, 6290), False, 'import logging\n'), ((6299, 6373), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.update_job_status', 'job_id', 'JobStatus.FAILED'], {}), '(job_service.update_job_status, job_id, JobStatus.FAILED)\n', (6316, 6373), False, 'from services.database.database import safe_db_operation\n'), ((6939, 6985), 'logging.error', 'logging.error', (['"""Error processing batch: %s"""', 'e'], {}), "('Error processing batch: %s', e)\n", (6952, 6985), False, 'import logging\n'), ((6994, 7068), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.update_job_status', 'job_id', 'JobStatus.FAILED'], {}), '(job_service.update_job_status, job_id, JobStatus.FAILED)\n', (7011, 7068), False, 'from services.database.database import safe_db_operation\n'), ((7308, 7334), 'services.rabbitmq.rabbit_service.create_connection_params', 'create_connection_params', ([], {}), '()\n', (7332, 7334), False, 'from services.rabbitmq.rabbit_service import create_connection_params\n'), ((7360, 7402), 'pika.BlockingConnection', 'pika.BlockingConnection', (['connection_params'], {}), '(connection_params)\n', (7383, 7402), False, 'import pika\n'), ((7539, 7568), 'os.getenv', 'os.getenv', (['"""EXTRACTION_QUEUE"""'], {}), "('EXTRACTION_QUEUE')\n", (7548, 7568), False, 'import os\n'), ((7602, 7630), 'os.getenv', 'os.getenv', (['"""EMBEDDING_QUEUE"""'], {}), "('EMBEDDING_QUEUE')\n", (7611, 7630), False, 'import os\n'), ((7880, 7917), 'logging.info', 'logging.info', (['"""Waiting for messages."""'], {}), "('Waiting for messages.')\n", (7892, 7917), False, 'import logging\n'), ((2824, 2846), 'pathlib.Path', 'Path', (['"""./temp_file.md"""'], {}), "('./temp_file.md')\n", (2828, 2846), False, 'from pathlib import Path\n'), ((2976, 3009), 'llama_index.download_loader', 'download_loader', (['"""MarkdownReader"""'], {}), "('MarkdownReader')\n", (2991, 3009), False, 'from llama_index import download_loader\n'), ((4690, 4750), 'logging.error', 'logging.error', (['"""Error publishing message to RabbitMQ: %s"""', 'e'], {}), "('Error publishing message to RabbitMQ: %s', e)\n", (4703, 4750), False, 'import logging\n'), ((5891, 6020), 'logging.info', 'logging.info', (['f"""Job {job_id} partially completed. {job.batches_succeeded} out of {job.total_batches} batches succeeded"""'], {}), "(\n f'Job {job_id} partially completed. {job.batches_succeeded} out of {job.total_batches} batches succeeded'\n )\n", (5903, 6020), False, 'import logging\n'), ((8073, 8118), 'logging.error', 'logging.error', (['"""AMQP Connection Error: %s"""', 'e'], {}), "('AMQP Connection Error: %s', e)\n", (8086, 8118), False, 'import logging\n'), ((8162, 8202), 'logging.error', 'logging.error', (['"""Unexpected error: %s"""', 'e'], {}), "('Unexpected error: %s', e)\n", (8175, 8202), False, 'import logging\n'), ((8669, 8718), 'logging.error', 'logging.error', (['"""Error in start_connection: %s"""', 'e'], {}), "('Error in start_connection: %s', e)\n", (8682, 8718), False, 'import logging\n'), ((8731, 8803), 'logging.info', 'logging.info', (['"""Restarting start_connection after encountering an error."""'], {}), "('Restarting start_connection after encountering an error.')\n", (8743, 8803), False, 'import logging\n'), ((8816, 8830), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (8826, 8830), False, 'import time\n'), ((4502, 4530), 'os.getenv', 'os.getenv', (['"""EMBEDDING_QUEUE"""'], {}), "('EMBEDDING_QUEUE')\n", (4511, 4530), False, 'import os\n'), ((6072, 6188), 'logging.info', 'logging.info', (['f"""Job {job_id} failed. {job.batches_succeeded} out of {job.total_batches} batches succeeded"""'], {}), "(\n f'Job {job_id} failed. {job.batches_succeeded} out of {job.total_batches} batches succeeded'\n )\n", (6084, 6188), False, 'import logging\n'), ((3347, 3363), 'io.BytesIO', 'BytesIO', (['content'], {}), '(content)\n', (3354, 3363), False, 'from io import BytesIO\n')] |
import os
import json
import time
from typing import List
import faiss
import pypdf
import random
import itertools
import text_utils
import pandas as pd
import altair as alt
import streamlit as st
from io import StringIO
from llama_index import Document
from langchain.llms import Anthropic
from langchain.chains import RetrievalQA
from langchain.vectorstores import FAISS
from llama_index import LangchainEmbedding
from langchain.chat_models import ChatOpenAI
from langchain.retrievers import SVMRetriever
from langchain.chains import QAGenerationChain
from langchain.retrievers import TFIDFRetriever
from langchain.evaluation.qa import QAEvalChain
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from gpt_index import LLMPredictor, ServiceContext, GPTFaissIndex
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from text_utils import GRADE_DOCS_PROMPT, GRADE_ANSWER_PROMPT, GRADE_DOCS_PROMPT_FAST, GRADE_ANSWER_PROMPT_FAST, GRADE_ANSWER_PROMPT_BIAS_CHECK, GRADE_ANSWER_PROMPT_OPENAI
# Keep dataframe in memory to accumulate experimental results
if "existing_df" not in st.session_state:
summary = pd.DataFrame(columns=['chunk_chars',
'overlap',
'split',
'model',
'retriever',
'embedding',
'num_neighbors',
'Latency',
'Retrieval score',
'Answer score'])
st.session_state.existing_df = summary
else:
summary = st.session_state.existing_df
@st.cache_data
def load_docs(files: List) -> str:
"""
Load docs from files
@param files: list of files to load
@return: string of all docs concatenated
"""
st.info("`Reading doc ...`")
all_text = ""
for file_path in files:
file_extension = os.path.splitext(file_path.name)[1]
if file_extension == ".pdf":
pdf_reader = pypdf.PdfReader(file_path)
file_content = ""
for page in pdf_reader.pages:
file_content += page.extract_text()
file_content = text_utils.clean_pdf_text(file_content)
all_text += file_content
elif file_extension == ".txt":
stringio = StringIO(file_path.getvalue().decode("utf-8"))
file_content = stringio.read()
all_text += file_content
else:
st.warning('Please provide txt or pdf.', icon="⚠️")
return all_text
@st.cache_data
def generate_eval(text: str, num_questions: int, chunk: int):
"""
Generate eval set
@param text: text to generate eval set from
@param num_questions: number of questions to generate
@param chunk: chunk size to draw question from in the doc
@return: eval set as JSON list
"""
st.info("`Generating eval set ...`")
n = len(text)
starting_indices = [random.randint(0, n - chunk) for _ in range(num_questions)]
sub_sequences = [text[i:i + chunk] for i in starting_indices]
chain = QAGenerationChain.from_llm(ChatOpenAI(temperature=0))
eval_set = []
for i, b in enumerate(sub_sequences):
try:
qa = chain.run(b)
eval_set.append(qa)
except:
st.warning('Error generating question %s.' % str(i + 1), icon="⚠️")
eval_set_full = list(itertools.chain.from_iterable(eval_set))
return eval_set_full
@st.cache_resource
def split_texts(text, chunk_size: int, overlap, split_method: str):
"""
Split text into chunks
@param text: text to split
@param chunk_size:
@param overlap:
@param split_method:
@return: list of str splits
"""
st.info("`Splitting doc ...`")
if split_method == "RecursiveTextSplitter":
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,
chunk_overlap=overlap)
elif split_method == "CharacterTextSplitter":
text_splitter = CharacterTextSplitter(separator=" ",
chunk_size=chunk_size,
chunk_overlap=overlap)
else:
st.warning("`Split method not recognized. Using RecursiveCharacterTextSplitter`", icon="⚠️")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,
chunk_overlap=overlap)
split_text = text_splitter.split_text(text)
return split_text
@st.cache_resource
def make_llm(model_version: str):
"""
Make LLM from model version
@param model_version: model_version
@return: LLN
"""
if (model_version == "gpt-3.5-turbo") or (model_version == "gpt-4"):
chosen_model = ChatOpenAI(model_name=model_version, temperature=0)
elif model_version == "anthropic":
chosen_model = Anthropic(temperature=0)
else:
st.warning("`Model version not recognized. Using gpt-3.5-turbo`", icon="⚠️")
chosen_model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
return chosen_model
@st.cache_resource
def make_retriever(splits, retriever_type, embedding_type, num_neighbors, _llm):
"""
Make document retriever
@param splits: list of str splits
@param retriever_type: retriever type
@param embedding_type: embedding type
@param num_neighbors: number of neighbors for retrieval
@param _llm: model
@return: retriever
"""
st.info("`Making retriever ...`")
# Set embeddings
if embedding_type == "OpenAI":
embedding = OpenAIEmbeddings()
elif embedding_type == "HuggingFace":
embedding = HuggingFaceEmbeddings()
else:
st.warning("`Embedding type not recognized. Using OpenAI`", icon="⚠️")
embedding = OpenAIEmbeddings()
# Select retriever
if retriever_type == "similarity-search":
try:
vector_store = FAISS.from_texts(splits, embedding)
except ValueError:
st.warning("`Error using OpenAI embeddings (disallowed TikToken token in the text). Using HuggingFace.`",
icon="⚠️")
vector_store = FAISS.from_texts(splits, HuggingFaceEmbeddings())
retriever_obj = vector_store.as_retriever(k=num_neighbors)
elif retriever_type == "SVM":
retriever_obj = SVMRetriever.from_texts(splits, embedding)
elif retriever_type == "TF-IDF":
retriever_obj = TFIDFRetriever.from_texts(splits)
elif retriever_type == "Llama-Index":
documents = [Document(t, LangchainEmbedding(embedding)) for t in splits]
llm_predictor = LLMPredictor(llm)
context = ServiceContext.from_defaults(chunk_size_limit=512, llm_predictor=llm_predictor)
d = 1536
faiss_index = faiss.IndexFlatL2(d)
retriever_obj = GPTFaissIndex.from_documents(documents, faiss_index=faiss_index, service_context=context)
else:
st.warning("`Retriever type not recognized. Using SVM`", icon="⚠️")
retriever_obj = SVMRetriever.from_texts(splits, embedding)
return retriever_obj
def make_chain(llm, retriever, retriever_type: str) -> RetrievalQA:
"""
Make chain
@param llm: model
@param retriever: retriever
@param retriever_type: retriever type
@return: chain (or return retriever for Llama-Index)
"""
st.info("`Making chain ...`")
if retriever_type == "Llama-Index":
qa = retriever
else:
qa = RetrievalQA.from_chain_type(llm,
chain_type="stuff",
retriever=retriever,
input_key="question")
return qa
def grade_model_answer(predicted_dataset: List, predictions: List, grade_answer_prompt: str) -> List:
"""
Grades the distilled answer based on ground truth and model predictions.
@param predicted_dataset: A list of dictionaries containing ground truth questions and answers.
@param predictions: A list of dictionaries containing model predictions for the questions.
@param grade_answer_prompt: The prompt level for the grading. Either "Fast" or "Full".
@return: A list of scores for the distilled answers.
"""
# Grade the distilled answer
st.info("`Grading model answer ...`")
# Set the grading prompt based on the grade_answer_prompt parameter
if grade_answer_prompt == "Fast":
prompt = GRADE_ANSWER_PROMPT_FAST
elif grade_answer_prompt == "Descriptive w/ bias check":
prompt = GRADE_ANSWER_PROMPT_BIAS_CHECK
elif grade_answer_prompt == "OpenAI grading prompt":
prompt = GRADE_ANSWER_PROMPT_OPENAI
else:
prompt = GRADE_ANSWER_PROMPT
# Create an evaluation chain
eval_chain = QAEvalChain.from_llm(
llm=ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0),
prompt=prompt
)
# Evaluate the predictions and ground truth using the evaluation chain
graded_outputs = eval_chain.evaluate(
predicted_dataset,
predictions,
question_key="question",
prediction_key="result"
)
return graded_outputs
def grade_model_retrieval(gt_dataset: List, predictions: List, grade_docs_prompt: str):
"""
Grades the relevance of retrieved documents based on ground truth and model predictions.
@param gt_dataset: list of dictionaries containing ground truth questions and answers.
@param predictions: list of dictionaries containing model predictions for the questions
@param grade_docs_prompt: prompt level for the grading. Either "Fast" or "Full"
@return: list of scores for the retrieved documents.
"""
# Grade the docs retrieval
st.info("`Grading relevance of retrieved docs ...`")
# Set the grading prompt based on the grade_docs_prompt parameter
prompt = GRADE_DOCS_PROMPT_FAST if grade_docs_prompt == "Fast" else GRADE_DOCS_PROMPT
# Create an evaluation chain
eval_chain = QAEvalChain.from_llm(
llm=ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0),
prompt=prompt
)
# Evaluate the predictions and ground truth using the evaluation chain
graded_outputs = eval_chain.evaluate(
gt_dataset,
predictions,
question_key="question",
prediction_key="result"
)
return graded_outputs
def run_evaluation(chain, retriever, eval_set, grade_prompt, retriever_type, num_neighbors):
"""
Runs evaluation on a model's performance on a given evaluation dataset.
@param chain: Model chain used for answering questions
@param retriever: Document retriever used for retrieving relevant documents
@param eval_set: List of dictionaries containing questions and corresponding ground truth answers
@param grade_prompt: String prompt used for grading model's performance
@param retriever_type: String specifying the type of retriever used
@param num_neighbors: Number of neighbors to retrieve using the retriever
@return: A tuple of four items:
- answers_grade: A dictionary containing scores for the model's answers.
- retrieval_grade: A dictionary containing scores for the model's document retrieval.
- latencies_list: A list of latencies in seconds for each question answered.
- predictions_list: A list of dictionaries containing the model's predicted answers and relevant documents for each question.
"""
st.info("`Running evaluation ...`")
predictions_list = []
retrieved_docs = []
gt_dataset = []
latencies_list = []
for data in eval_set:
# Get answer and log latency
start_time = time.time()
if retriever_type != "Llama-Index":
predictions_list.append(chain(data))
elif retriever_type == "Llama-Index":
answer = chain.query(data["question"], similarity_top_k=num_neighbors, response_mode="tree_summarize",
use_async=True)
predictions_list.append({"question": data["question"], "answer": data["answer"], "result": answer.response})
gt_dataset.append(data)
end_time = time.time()
elapsed_time = end_time - start_time
latencies_list.append(elapsed_time)
# Retrieve docs
retrieved_doc_text = ""
if retriever_type == "Llama-Index":
for i, doc in enumerate(answer.source_nodes):
retrieved_doc_text += "Doc %s: " % str(i + 1) + doc.node.text + " "
else:
docs = retriever.get_relevant_documents(data["question"])
for i, doc in enumerate(docs):
retrieved_doc_text += "Doc %s: " % str(i + 1) + doc.page_content + " "
retrieved = {"question": data["question"], "answer": data["answer"], "result": retrieved_doc_text}
retrieved_docs.append(retrieved)
# Grade
answers_grade = grade_model_answer(gt_dataset, predictions_list, grade_prompt)
retrieval_grade = grade_model_retrieval(gt_dataset, retrieved_docs, grade_prompt)
return answers_grade, retrieval_grade, latencies_list, predictions_list
# Auth
st.sidebar.image("img/diagnostic.jpg")
with st.sidebar.form("user_input"):
num_eval_questions = st.select_slider("`Number of eval questions`",
options=[1, 5, 10, 15, 20], value=5)
chunk_chars = st.select_slider("`Choose chunk size for splitting`",
options=[500, 750, 1000, 1500, 2000], value=1000)
overlap = st.select_slider("`Choose overlap for splitting`",
options=[0, 50, 100, 150, 200], value=100)
split_method = st.radio("`Split method`",
("RecursiveTextSplitter",
"CharacterTextSplitter"),
index=0)
model = st.radio("`Choose model`",
("gpt-3.5-turbo",
"gpt-4",
"anthropic"),
index=0)
retriever_type = st.radio("`Choose retriever`",
("TF-IDF",
"SVM",
"Llama-Index",
"similarity-search"),
index=3)
num_neighbors = st.select_slider("`Choose # chunks to retrieve`",
options=[3, 4, 5, 6, 7, 8])
embeddings = st.radio("`Choose embeddings`",
("HuggingFace",
"OpenAI"),
index=1)
grade_prompt = st.radio("`Grading style prompt`",
("Fast",
"Descriptive",
"Descriptive w/ bias check",
"OpenAI grading prompt"),
index=0)
submitted = st.form_submit_button("Submit evaluation")
# App
st.header("`Auto-evaluator`")
st.info(
"`I am an evaluation tool for question-answering. Given documents, I will auto-generate a question-answer eval "
"set and evaluate using the selected chain settings. Experiments with different configurations are logged. "
"Optionally, provide your own eval set (as a JSON, see docs/karpathy-pod-eval.json for an example).`")
with st.form(key='file_inputs'):
uploaded_file = st.file_uploader("`Please upload a file to evaluate (.txt or .pdf):` ",
type=['pdf', 'txt'],
accept_multiple_files=True)
uploaded_eval_set = st.file_uploader("`[Optional] Please upload eval set (.json):` ",
type=['json'],
accept_multiple_files=False)
submitted = st.form_submit_button("Submit files")
if uploaded_file:
# Load docs
text = load_docs(uploaded_file)
# Generate num_eval_questions questions, each from context of 3k chars randomly selected
if not uploaded_eval_set:
eval_set = generate_eval(text, num_eval_questions, 3000)
else:
eval_set = json.loads(uploaded_eval_set.read())
# Split text
splits = split_texts(text, chunk_chars, overlap, split_method)
# Make LLM
llm = make_llm(model)
# Make vector DB
retriever = make_retriever(splits, retriever_type, embeddings, num_neighbors, llm)
# Make chain
qa_chain = make_chain(llm, retriever, retriever_type)
# Grade model
graded_answers, graded_retrieval, latency, predictions = run_evaluation(qa_chain, retriever, eval_set, grade_prompt,
retriever_type, num_neighbors)
# Assemble outputs
d = pd.DataFrame(predictions)
d['answer score'] = [g['text'] for g in graded_answers]
d['docs score'] = [g['text'] for g in graded_retrieval]
d['latency'] = latency
# Summary statistics
mean_latency = d['latency'].mean()
correct_answer_count = len([text for text in d['answer score'] if "INCORRECT" not in text])
correct_docs_count = len([text for text in d['docs score'] if "Context is relevant: True" in text])
percentage_answer = (correct_answer_count / len(graded_answers)) * 100
percentage_docs = (correct_docs_count / len(graded_retrieval)) * 100
st.subheader("`Run Results`")
st.info(
"`I will grade the chain based on: 1/ the relevance of the retrived documents relative to the question and 2/ "
"the summarized answer relative to the ground truth answer. You can see (and change) to prompts used for "
"grading in text_utils`")
st.dataframe(data=d, use_container_width=True)
# Accumulate results
st.subheader("`Aggregate Results`")
st.info(
"`Retrieval and answer scores are percentage of retrived documents deemed relevant by the LLM grader ("
"relative to the question) and percentage of summarized answers deemed relevant (relative to ground truth "
"answer), respectively. The size of point correponds to the latency (in seconds) of retrieval + answer "
"summarization (larger circle = slower).`")
new_row = pd.DataFrame({'chunk_chars': [chunk_chars],
'overlap': [overlap],
'split': [split_method],
'model': [model],
'retriever': [retriever_type],
'embedding': [embeddings],
'num_neighbors': [num_neighbors],
'Latency': [mean_latency],
'Retrieval score': [percentage_docs],
'Answer score': [percentage_answer]})
summary = pd.concat([summary, new_row], ignore_index=True)
st.dataframe(data=summary, use_container_width=True)
st.session_state.existing_df = summary
# Dataframe for visualization
show = summary.reset_index().copy()
show.columns = ['expt number', 'chunk_chars', 'overlap',
'split', 'model', 'retriever', 'embedding', 'num_neighbors', 'Latency', 'Retrieval score',
'Answer score']
show['expt number'] = show['expt number'].apply(lambda x: "Expt #: " + str(x + 1))
c = alt.Chart(show).mark_circle().encode(x='Retrieval score',
y='Answer score',
size=alt.Size('Latency'),
color='expt number',
tooltip=['expt number', 'Retrieval score', 'Latency', 'Answer score'])
st.altair_chart(c, use_container_width=True, theme="streamlit")
| [
"llama_index.LangchainEmbedding"
] | [((13312, 13350), 'streamlit.sidebar.image', 'st.sidebar.image', (['"""img/diagnostic.jpg"""'], {}), "('img/diagnostic.jpg')\n", (13328, 13350), True, 'import streamlit as st\n'), ((15130, 15159), 'streamlit.header', 'st.header', (['"""`Auto-evaluator`"""'], {}), "('`Auto-evaluator`')\n", (15139, 15159), True, 'import streamlit as st\n'), ((15160, 15496), 'streamlit.info', 'st.info', (['"""`I am an evaluation tool for question-answering. Given documents, I will auto-generate a question-answer eval set and evaluate using the selected chain settings. Experiments with different configurations are logged. Optionally, provide your own eval set (as a JSON, see docs/karpathy-pod-eval.json for an example).`"""'], {}), "(\n '`I am an evaluation tool for question-answering. Given documents, I will auto-generate a question-answer eval set and evaluate using the selected chain settings. Experiments with different configurations are logged. Optionally, provide your own eval set (as a JSON, see docs/karpathy-pod-eval.json for an example).`'\n )\n", (15167, 15496), True, 'import streamlit as st\n'), ((1209, 1372), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['chunk_chars', 'overlap', 'split', 'model', 'retriever', 'embedding',\n 'num_neighbors', 'Latency', 'Retrieval score', 'Answer score']"}), "(columns=['chunk_chars', 'overlap', 'split', 'model',\n 'retriever', 'embedding', 'num_neighbors', 'Latency', 'Retrieval score',\n 'Answer score'])\n", (1221, 1372), True, 'import pandas as pd\n'), ((1964, 1992), 'streamlit.info', 'st.info', (['"""`Reading doc ...`"""'], {}), "('`Reading doc ...`')\n", (1971, 1992), True, 'import streamlit as st\n'), ((3028, 3064), 'streamlit.info', 'st.info', (['"""`Generating eval set ...`"""'], {}), "('`Generating eval set ...`')\n", (3035, 3064), True, 'import streamlit as st\n'), ((3888, 3918), 'streamlit.info', 'st.info', (['"""`Splitting doc ...`"""'], {}), "('`Splitting doc ...`')\n", (3895, 3918), True, 'import streamlit as st\n'), ((5679, 5712), 'streamlit.info', 'st.info', (['"""`Making retriever ...`"""'], {}), "('`Making retriever ...`')\n", (5686, 5712), True, 'import streamlit as st\n'), ((7560, 7589), 'streamlit.info', 'st.info', (['"""`Making chain ...`"""'], {}), "('`Making chain ...`')\n", (7567, 7589), True, 'import streamlit as st\n'), ((8486, 8523), 'streamlit.info', 'st.info', (['"""`Grading model answer ...`"""'], {}), "('`Grading model answer ...`')\n", (8493, 8523), True, 'import streamlit as st\n'), ((9923, 9975), 'streamlit.info', 'st.info', (['"""`Grading relevance of retrieved docs ...`"""'], {}), "('`Grading relevance of retrieved docs ...`')\n", (9930, 9975), True, 'import streamlit as st\n'), ((11634, 11669), 'streamlit.info', 'st.info', (['"""`Running evaluation ...`"""'], {}), "('`Running evaluation ...`')\n", (11641, 11669), True, 'import streamlit as st\n'), ((13357, 13386), 'streamlit.sidebar.form', 'st.sidebar.form', (['"""user_input"""'], {}), "('user_input')\n", (13372, 13386), True, 'import streamlit as st\n'), ((13413, 13500), 'streamlit.select_slider', 'st.select_slider', (['"""`Number of eval questions`"""'], {'options': '[1, 5, 10, 15, 20]', 'value': '(5)'}), "('`Number of eval questions`', options=[1, 5, 10, 15, 20],\n value=5)\n", (13429, 13500), True, 'import streamlit as st\n'), ((13558, 13666), 'streamlit.select_slider', 'st.select_slider', (['"""`Choose chunk size for splitting`"""'], {'options': '[500, 750, 1000, 1500, 2000]', 'value': '(1000)'}), "('`Choose chunk size for splitting`', options=[500, 750, \n 1000, 1500, 2000], value=1000)\n", (13574, 13666), True, 'import streamlit as st\n'), ((13712, 13809), 'streamlit.select_slider', 'st.select_slider', (['"""`Choose overlap for splitting`"""'], {'options': '[0, 50, 100, 150, 200]', 'value': '(100)'}), "('`Choose overlap for splitting`', options=[0, 50, 100, 150,\n 200], value=100)\n", (13728, 13809), True, 'import streamlit as st\n'), ((13857, 13948), 'streamlit.radio', 'st.radio', (['"""`Split method`"""', "('RecursiveTextSplitter', 'CharacterTextSplitter')"], {'index': '(0)'}), "('`Split method`', ('RecursiveTextSplitter',\n 'CharacterTextSplitter'), index=0)\n", (13865, 13948), True, 'import streamlit as st\n'), ((14043, 14119), 'streamlit.radio', 'st.radio', (['"""`Choose model`"""', "('gpt-3.5-turbo', 'gpt-4', 'anthropic')"], {'index': '(0)'}), "('`Choose model`', ('gpt-3.5-turbo', 'gpt-4', 'anthropic'), index=0)\n", (14051, 14119), True, 'import streamlit as st\n'), ((14228, 14326), 'streamlit.radio', 'st.radio', (['"""`Choose retriever`"""', "('TF-IDF', 'SVM', 'Llama-Index', 'similarity-search')"], {'index': '(3)'}), "('`Choose retriever`', ('TF-IDF', 'SVM', 'Llama-Index',\n 'similarity-search'), index=3)\n", (14236, 14326), True, 'import streamlit as st\n'), ((14497, 14574), 'streamlit.select_slider', 'st.select_slider', (['"""`Choose # chunks to retrieve`"""'], {'options': '[3, 4, 5, 6, 7, 8]'}), "('`Choose # chunks to retrieve`', options=[3, 4, 5, 6, 7, 8])\n", (14513, 14574), True, 'import streamlit as st\n'), ((14630, 14697), 'streamlit.radio', 'st.radio', (['"""`Choose embeddings`"""', "('HuggingFace', 'OpenAI')"], {'index': '(1)'}), "('`Choose embeddings`', ('HuggingFace', 'OpenAI'), index=1)\n", (14638, 14697), True, 'import streamlit as st\n'), ((14797, 14923), 'streamlit.radio', 'st.radio', (['"""`Grading style prompt`"""', "('Fast', 'Descriptive', 'Descriptive w/ bias check', 'OpenAI grading prompt')"], {'index': '(0)'}), "('`Grading style prompt`', ('Fast', 'Descriptive',\n 'Descriptive w/ bias check', 'OpenAI grading prompt'), index=0)\n", (14805, 14923), True, 'import streamlit as st\n'), ((15080, 15122), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit evaluation"""'], {}), "('Submit evaluation')\n", (15101, 15122), True, 'import streamlit as st\n'), ((15512, 15538), 'streamlit.form', 'st.form', ([], {'key': '"""file_inputs"""'}), "(key='file_inputs')\n", (15519, 15538), True, 'import streamlit as st\n'), ((15560, 15684), 'streamlit.file_uploader', 'st.file_uploader', (['"""`Please upload a file to evaluate (.txt or .pdf):` """'], {'type': "['pdf', 'txt']", 'accept_multiple_files': '(True)'}), "('`Please upload a file to evaluate (.txt or .pdf):` ',\n type=['pdf', 'txt'], accept_multiple_files=True)\n", (15576, 15684), True, 'import streamlit as st\n'), ((15780, 15894), 'streamlit.file_uploader', 'st.file_uploader', (['"""`[Optional] Please upload eval set (.json):` """'], {'type': "['json']", 'accept_multiple_files': '(False)'}), "('`[Optional] Please upload eval set (.json):` ', type=[\n 'json'], accept_multiple_files=False)\n", (15796, 15894), True, 'import streamlit as st\n'), ((15989, 16026), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit files"""'], {}), "('Submit files')\n", (16010, 16026), True, 'import streamlit as st\n'), ((16933, 16958), 'pandas.DataFrame', 'pd.DataFrame', (['predictions'], {}), '(predictions)\n', (16945, 16958), True, 'import pandas as pd\n'), ((17524, 17553), 'streamlit.subheader', 'st.subheader', (['"""`Run Results`"""'], {}), "('`Run Results`')\n", (17536, 17553), True, 'import streamlit as st\n'), ((17558, 17814), 'streamlit.info', 'st.info', (['"""`I will grade the chain based on: 1/ the relevance of the retrived documents relative to the question and 2/ the summarized answer relative to the ground truth answer. You can see (and change) to prompts used for grading in text_utils`"""'], {}), "(\n '`I will grade the chain based on: 1/ the relevance of the retrived documents relative to the question and 2/ the summarized answer relative to the ground truth answer. You can see (and change) to prompts used for grading in text_utils`'\n )\n", (17565, 17814), True, 'import streamlit as st\n'), ((17840, 17886), 'streamlit.dataframe', 'st.dataframe', ([], {'data': 'd', 'use_container_width': '(True)'}), '(data=d, use_container_width=True)\n', (17852, 17886), True, 'import streamlit as st\n'), ((17917, 17952), 'streamlit.subheader', 'st.subheader', (['"""`Aggregate Results`"""'], {}), "('`Aggregate Results`')\n", (17929, 17952), True, 'import streamlit as st\n'), ((17957, 18326), 'streamlit.info', 'st.info', (['"""`Retrieval and answer scores are percentage of retrived documents deemed relevant by the LLM grader (relative to the question) and percentage of summarized answers deemed relevant (relative to ground truth answer), respectively. The size of point correponds to the latency (in seconds) of retrieval + answer summarization (larger circle = slower).`"""'], {}), "(\n '`Retrieval and answer scores are percentage of retrived documents deemed relevant by the LLM grader (relative to the question) and percentage of summarized answers deemed relevant (relative to ground truth answer), respectively. The size of point correponds to the latency (in seconds) of retrieval + answer summarization (larger circle = slower).`'\n )\n", (17964, 18326), True, 'import streamlit as st\n'), ((18373, 18693), 'pandas.DataFrame', 'pd.DataFrame', (["{'chunk_chars': [chunk_chars], 'overlap': [overlap], 'split': [split_method\n ], 'model': [model], 'retriever': [retriever_type], 'embedding': [\n embeddings], 'num_neighbors': [num_neighbors], 'Latency': [mean_latency\n ], 'Retrieval score': [percentage_docs], 'Answer score': [\n percentage_answer]}"], {}), "({'chunk_chars': [chunk_chars], 'overlap': [overlap], 'split':\n [split_method], 'model': [model], 'retriever': [retriever_type],\n 'embedding': [embeddings], 'num_neighbors': [num_neighbors], 'Latency':\n [mean_latency], 'Retrieval score': [percentage_docs], 'Answer score': [\n percentage_answer]})\n", (18385, 18693), True, 'import pandas as pd\n'), ((18943, 18991), 'pandas.concat', 'pd.concat', (['[summary, new_row]'], {'ignore_index': '(True)'}), '([summary, new_row], ignore_index=True)\n', (18952, 18991), True, 'import pandas as pd\n'), ((18996, 19048), 'streamlit.dataframe', 'st.dataframe', ([], {'data': 'summary', 'use_container_width': '(True)'}), '(data=summary, use_container_width=True)\n', (19008, 19048), True, 'import streamlit as st\n'), ((19848, 19911), 'streamlit.altair_chart', 'st.altair_chart', (['c'], {'use_container_width': '(True)', 'theme': '"""streamlit"""'}), "(c, use_container_width=True, theme='streamlit')\n", (19863, 19911), True, 'import streamlit as st\n'), ((3107, 3135), 'random.randint', 'random.randint', (['(0)', '(n - chunk)'], {}), '(0, n - chunk)\n', (3121, 3135), False, 'import random\n'), ((3272, 3297), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (3282, 3297), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3555, 3594), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['eval_set'], {}), '(eval_set)\n', (3584, 3594), False, 'import itertools\n'), ((3991, 4067), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'overlap'}), '(chunk_size=chunk_size, chunk_overlap=overlap)\n', (4021, 4067), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter\n'), ((4966, 5017), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_version', 'temperature': '(0)'}), '(model_name=model_version, temperature=0)\n', (4976, 5017), False, 'from langchain.chat_models import ChatOpenAI\n'), ((5789, 5807), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (5805, 5807), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((7676, 7775), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', (['llm'], {'chain_type': '"""stuff"""', 'retriever': 'retriever', 'input_key': '"""question"""'}), "(llm, chain_type='stuff', retriever=retriever,\n input_key='question')\n", (7703, 7775), False, 'from langchain.chains import RetrievalQA\n'), ((11850, 11861), 'time.time', 'time.time', ([], {}), '()\n', (11859, 11861), False, 'import time\n'), ((12337, 12348), 'time.time', 'time.time', ([], {}), '()\n', (12346, 12348), False, 'import time\n'), ((2064, 2096), 'os.path.splitext', 'os.path.splitext', (['file_path.name'], {}), '(file_path.name)\n', (2080, 2096), False, 'import os\n'), ((2162, 2188), 'pypdf.PdfReader', 'pypdf.PdfReader', (['file_path'], {}), '(file_path)\n', (2177, 2188), False, 'import pypdf\n'), ((2340, 2379), 'text_utils.clean_pdf_text', 'text_utils.clean_pdf_text', (['file_content'], {}), '(file_content)\n', (2365, 2379), False, 'import text_utils\n'), ((4197, 4284), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '""" """', 'chunk_size': 'chunk_size', 'chunk_overlap': 'overlap'}), "(separator=' ', chunk_size=chunk_size, chunk_overlap=\n overlap)\n", (4218, 4284), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter\n'), ((4390, 4491), 'streamlit.warning', 'st.warning', (['"""`Split method not recognized. Using RecursiveCharacterTextSplitter`"""'], {'icon': '"""⚠️"""'}), "(\n '`Split method not recognized. Using RecursiveCharacterTextSplitter`',\n icon='⚠️')\n", (4400, 4491), True, 'import streamlit as st\n'), ((4507, 4583), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'overlap'}), '(chunk_size=chunk_size, chunk_overlap=overlap)\n', (4537, 4583), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter\n'), ((5080, 5104), 'langchain.llms.Anthropic', 'Anthropic', ([], {'temperature': '(0)'}), '(temperature=0)\n', (5089, 5104), False, 'from langchain.llms import Anthropic\n'), ((5123, 5199), 'streamlit.warning', 'st.warning', (['"""`Model version not recognized. Using gpt-3.5-turbo`"""'], {'icon': '"""⚠️"""'}), "('`Model version not recognized. Using gpt-3.5-turbo`', icon='⚠️')\n", (5133, 5199), True, 'import streamlit as st\n'), ((5223, 5276), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (5233, 5276), False, 'from langchain.chat_models import ChatOpenAI\n'), ((5870, 5893), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (5891, 5893), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((5912, 5982), 'streamlit.warning', 'st.warning', (['"""`Embedding type not recognized. Using OpenAI`"""'], {'icon': '"""⚠️"""'}), "('`Embedding type not recognized. Using OpenAI`', icon='⚠️')\n", (5922, 5982), True, 'import streamlit as st\n'), ((6003, 6021), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (6019, 6021), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((6132, 6167), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['splits', 'embedding'], {}), '(splits, embedding)\n', (6148, 6167), False, 'from langchain.vectorstores import FAISS\n'), ((6549, 6591), 'langchain.retrievers.SVMRetriever.from_texts', 'SVMRetriever.from_texts', (['splits', 'embedding'], {}), '(splits, embedding)\n', (6572, 6591), False, 'from langchain.retrievers import SVMRetriever\n'), ((9018, 9071), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (9028, 9071), False, 'from langchain.chat_models import ChatOpenAI\n'), ((10222, 10275), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (10232, 10275), False, 'from langchain.chat_models import ChatOpenAI\n'), ((19641, 19660), 'altair.Size', 'alt.Size', (['"""Latency"""'], {}), "('Latency')\n", (19649, 19660), True, 'import altair as alt\n'), ((2632, 2683), 'streamlit.warning', 'st.warning', (['"""Please provide txt or pdf."""'], {'icon': '"""⚠️"""'}), "('Please provide txt or pdf.', icon='⚠️')\n", (2642, 2683), True, 'import streamlit as st\n'), ((6207, 6333), 'streamlit.warning', 'st.warning', (['"""`Error using OpenAI embeddings (disallowed TikToken token in the text). Using HuggingFace.`"""'], {'icon': '"""⚠️"""'}), "(\n '`Error using OpenAI embeddings (disallowed TikToken token in the text). Using HuggingFace.`'\n , icon='⚠️')\n", (6217, 6333), True, 'import streamlit as st\n'), ((6653, 6686), 'langchain.retrievers.TFIDFRetriever.from_texts', 'TFIDFRetriever.from_texts', (['splits'], {}), '(splits)\n', (6678, 6686), False, 'from langchain.retrievers import TFIDFRetriever\n'), ((6399, 6422), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (6420, 6422), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((6834, 6851), 'gpt_index.LLMPredictor', 'LLMPredictor', (['llm'], {}), '(llm)\n', (6846, 6851), False, 'from gpt_index import LLMPredictor, ServiceContext, GPTFaissIndex\n'), ((6870, 6949), 'gpt_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size_limit': '(512)', 'llm_predictor': 'llm_predictor'}), '(chunk_size_limit=512, llm_predictor=llm_predictor)\n', (6898, 6949), False, 'from gpt_index import LLMPredictor, ServiceContext, GPTFaissIndex\n'), ((6989, 7009), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['d'], {}), '(d)\n', (7006, 7009), False, 'import faiss\n'), ((7034, 7127), 'gpt_index.GPTFaissIndex.from_documents', 'GPTFaissIndex.from_documents', (['documents'], {'faiss_index': 'faiss_index', 'service_context': 'context'}), '(documents, faiss_index=faiss_index,\n service_context=context)\n', (7062, 7127), False, 'from gpt_index import LLMPredictor, ServiceContext, GPTFaissIndex\n'), ((7142, 7209), 'streamlit.warning', 'st.warning', (['"""`Retriever type not recognized. Using SVM`"""'], {'icon': '"""⚠️"""'}), "('`Retriever type not recognized. Using SVM`', icon='⚠️')\n", (7152, 7209), True, 'import streamlit as st\n'), ((7234, 7276), 'langchain.retrievers.SVMRetriever.from_texts', 'SVMRetriever.from_texts', (['splits', 'embedding'], {}), '(splits, embedding)\n', (7257, 7276), False, 'from langchain.retrievers import SVMRetriever\n'), ((19470, 19485), 'altair.Chart', 'alt.Chart', (['show'], {}), '(show)\n', (19479, 19485), True, 'import altair as alt\n'), ((6762, 6791), 'llama_index.LangchainEmbedding', 'LangchainEmbedding', (['embedding'], {}), '(embedding)\n', (6780, 6791), False, 'from llama_index import LangchainEmbedding\n')] |
import numpy as np
import json
from fastapi import HTTPException, FastAPI, websockets
import json # for JSON parsing and packing
# for easy async code
# for absolute project paths
import os
# astro chat engines
from chat_objects import *
from chat_engine import ChatEngine
from vedastro import * # install via pip
import time # for performance measurements
# make sure KEY has been supplied
# exp use : api_key = os.environ["ANYSCALE_API_KEY"]
# load API keys from .env file
import os
if "ANYSCALE_API_KEY" not in os.environ:
raise RuntimeError("KEY MISSING DINGUS")
FAISS_INDEX_PATH = "faiss_index"
# instances embedded vector stored here for speed, shared between all calls
loaded_vectors = {}
chat_engines = {}
preset_queries = {}
embeddings_creator = {}
# init app to handle HTTP requests
app = FastAPI(title="Chat API")
# ログレベルの設定 (make server output more fun to watch 😁 📺)
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, force=True)
# ..𝕗𝕠𝕣 𝕚𝕗 𝕪𝕠𝕦 𝕒𝕣𝕖 the 𝕠𝕗 𝕨𝕠𝕣𝕤𝕥 the worst
# 𝕒𝕟𝕕 𝕪𝕠𝕦 𝕝𝕠𝕧𝕖 𝔾𝕠𝕕, 𝕪𝕠𝕦❜𝕣𝕖 𝕗𝕣𝕖𝕖❕
# Yogananda
# prepares server, run once on startup
def initialize_chat_api():
global chat_engines # set cache
global loaded_vectors # set cache
global preset_queries # set cache
global embeddings_creator # set cache
print("T-minus countdown")
print("Go/No-Go Poll")
variation_name = "MK7"
print("Loaded Vectors go!")
####################################
# prepare the LLM that will answer the query
# select the correct engine variation
wrapper = ChatEngine("MK7")
# load the modal shards (heavy compute)
chat_engines["MK7"] = wrapper.create_instance()
print("Chat Model LLMs go!")
####################################
# STEP 1 : Prepare Query Map
# this will be used to find preset user query intentions, with standard answers
# preset_queries, embeddings_creator = ChatTools.get_parsed_query_map()
# print("Query Mapper go!")
####################################
print("All systems are go")
print("Astronauts, start your engines")
print("Main engine start")
print("Ignition sequence start")
print("All engines running")
print("We have lift off!")
# brings together the answering mechanism
# marked for OBLIVION
async def answer_to_reply(chat_input):
print("################ START: ANSWER_TO_REPLY ################")
global chat_engines
global preset_queries
global embeddings_creator
# TODO
# do query mapping if possible
# if is_query:
# auto_reply = ChatTools.map_query_by_similarity(
# payload.query, payload.llm_model_name, preset_queries, embeddings_creator)
# time to call the in big guns...GPU infantry firing LLM "chat/completion" shells!
# STEP 2: COMBINE CONTEXT AND QUESTION AND SEND TO CHAT LLM
# Query the chat engine and send the results to the client
llm_response = chat_engines["MK7"].query(
text=chat_input["text"],
topic=chat_input["topic"], # birth time/book name
# Controls the trade-off between randomness and determinism in the response
# A high value (e.g., 1.0) makes the model more random and creative
# temperature=chat_input["temperature"],
# # Controls diversity of the response
# # A high value (e.g., 0.9) allows for more diversity
# top_p=chat_input["top_p"],
# # Limits the maximum length of the generated text
# max_tokens=chat_input["max_tokens"],
# # Specifies sequences that tell the model when to stop generating text
# stop=chat_input["stop"],
# # Returns debug data like usage statistics
# return_debug_data=False # Set to True to see detailed information about model usage
)
# note: metadata and source nodes used is all here
# but we only take AI text response
text_response = llm_response.response
return text_response
@app.get("/")
def home():
return {"Welcome to ChatAPI : open-source chat AI for vedic astrology"}
# SEARCH
@app.post('/HoroscopeLLMSearch')
async def horoscope_llmsearch(payload: SearchPayload):
try:
global loaded_vectors
# lazy load for speed
# use file path as id for dynamic LLM modal support
savePathPrefix = "horoscope"
# use modal name for multiple modal support
filePath = f"{FAISS_INDEX_PATH}/{savePathPrefix}/{payload.llm_model_name}"
if loaded_vectors.get(filePath) is None:
# load the horoscope vectors (heavy compute)
loaded_vectors[filePath] = EmbedVectors(filePath, payload.llm_model_name)
# # get all predictions for given birth time (aka filter)
# run calculator to get list of prediction names for given birth time
birthTime = payload.get_birth_time()
calcResult = Calculate.HoroscopePredictionNames(birthTime)
# format list nicely so LLM can swallow
birthPredictions = {"name": [item for item in calcResult]}
# do LLM search on found predictions
results_formated = loaded_vectors[filePath].search(payload.query, payload.search_type, birthPredictions)
return results_formated
# if fail, fall gracefully my dear
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@app.websocket("/HoroscopeChat")
async def horoscope_chat(websocket: websockets.WebSocket):
global chat_engines # use cache
global loaded_vectors # use cache
await websocket.accept()
ai_reply = "👋 Welcome, how may i help?"
await websocket.send_text(ChatTools.package_reply_for_shippment(command=np.array(["no_feedback"]), text_html=ai_reply, text=ai_reply, text_hash=ChatTools.random_id()))
# connection has been made, now keep connection alive in infinite loop,
# with fail safe to catch it midway
try:
session_id = ChatTools.random_id(23)
message_count = 0
# BEATING HEART 💓
# this is the loop that keeps chat running at the highest level
while True: # beat forever....my sweet love
# STAGE 1:
# Receive a message from the client
# control is held here while waiting for user input
client_input_raw = await websocket.receive_text()
# 1.1 : prepare needed data
input_parsed = json.loads(client_input_raw)
# command : can come from client as well (exp:follow-up question)
if "command" in input_parsed and not hasattr(input_parsed["command"], 'tolist'): # convert to numpy
input_parsed["command"] = np.array(input_parsed["command"])
if "command" not in input_parsed:
# make new command
input_parsed["command"] = np.array([""])
# text
if "text" not in input_parsed:
input_parsed["text"] = "" # easy detect if empty
# topic
if "topic" not in input_parsed:
input_parsed["topic"] = "" # easy detect if empty
# session id
if "session_id" not in input_parsed:
# overwrite only if not specified
input_parsed["session_id"] = session_id
# rating
if "rating" not in input_parsed:
input_parsed["rating"] = 0 # overwrite only if not specified
# text_hash : for user to give point to a message and rate it
if "text_hash" not in input_parsed:
# overwrite only if not specified
input_parsed["text_hash"] = ""
# user_id : internet fungus filter
if "user_id" not in input_parsed or input_parsed["user_id"] == "101":
input_parsed["user_id"] = "" # overwrite only if not specified
# sender
if "sender" not in input_parsed:
input_parsed["sender"] = "Human" # incoming is always Human
# STAGE 2 : THINK
# standard 1st answer
ai_html_reply = ai_reply = "Thinking....🤔"
# 2.1 : user not logged in (could be a Machine!) beware internet fungus! (END HERE)
if input_parsed["user_id"] == "":
# add special command recognized by VedAstro.js to show handle login nicely
input_parsed["command"] = np.append(input_parsed["command"], "please_login")
ai_reply = """Please login sir...to verify you are not a robot 🤖\nEasy & secure login with Google or Facebook\n\n.....I understand this is annoying, but I have no choice!🤗\nthere are robots in the internet who target smart AI Chat agents like me.\nPlease login to start talking about astrology...💬
"""
ai_html_reply = """
Please login sir...to verify you are not a robot 🤖<br>
Easy & secure login with <a target="_blank" style="text-decoration-line: none;" href="https://vedastro.org/Account/Login/RememberMe" class="link-primary fw-bold">Google</a> or <a target="_blank" style="text-decoration-line: none;" href="https://vedastro.org/Account/Login/RememberMe" class="link-primary fw-bold">Facebook</a><br><br>
.....I understand this is annoying, but I have no choice!🤗<br>
there are robots in the internet who target smart AI Chat agents like me.<br>
So please login to get started...<br>
"""
# 2.2 : rating AND text_hash specified --> user is giving rating vote NOT QUERY (END HERE)
if input_parsed["rating"] != 0 and input_parsed["text_hash"] != "":
# memorize Human's rating
AzureTableManager.rate_message(input_parsed["session_id"], input_parsed["text_hash"], input_parsed["rating"])
# increse contribution score
contribution_score = AzureTableManager.increase_contribution_score(input_parsed["user_id"], 1)
# say thanks 🙏
# NOTE: DO NOT tell the user explcitly to give more feedback
# pysholocgy 101 : give them the sincere motivation to help instead -> better quality/quantity
# if we tell the user explicitly, we increase the probablity of deterministic failure, pushing the user into 1 of 2 camps
ai_reply = f"""Congratulation!🫡\n You have just helped improve astrology worldwide🌍\n I have now memorized your feedback,🧠\n now on all my answer will take your feedback into consideration.\n Thank you so much for the rating🙏\n"""
ai_html_reply = f"""
<h5>🥇 AI Contributor Score : <b>{contribution_score*10}</b></h5>
Congratulation!🫡<br>
You have just helped improve astrology worldwide🌍<br><br>
I have now <b>memorized your feedback</b>,🧠<br>
now on all my answer will take your feedback into consideration.<br><br>
Thank you so much for the rating🙏<br><br>
"""
# no feedback needed here
input_parsed["command"] = np.append(input_parsed["command"], "no_feedback")
# update caller with itermidiate message
await websocket.send_text(ChatTools.package_reply_for_shippment(command=input_parsed["command"], text_html=ai_html_reply, text=ai_reply, text_hash=ChatTools.random_id()))
# get start feedback beyond this point
# this will START showing give feedback buttons 🗣️🙏
index = np.where(input_parsed["command"] == "no_feedback")
input_parsed["command"] = np.delete(input_parsed["command"], index)
# +> FOLLOW-UP --> specialiazed lite llm call
# |
# QUESTION ---+
# |
# +> UNRELATED --> full llama raq synthesis
# mark as follow up only if a follow up question is present
is_followup = "followup_question" in input_parsed["command"]
all_checks_pass = ai_reply == "" or ai_reply == "Thinking....🤔"
# use later for highlight (UX improve)
user_question = input_parsed["text"] # user's question
# message_number : needed for quick revisit answer lookup
message_count += 1
input_parsed["message_number"] = message_count
# SAVE QUESTION
# format & log message for inteligent past QA (id out is for reference)
# chat_raw_input : text, session_id, rating, message_number
human_question_hash = AzureTableManager.save_message_in_azure(input_parsed)
# UNRELATED
if not is_followup and all_checks_pass: # only call LLM if all checks and balances are even
#FOLLOW UP
print("################ DETECTED: LLM UNRELATED REPLY ################")
# answer machine (send all needed data)
raw_result = chat_engines["MK7"].query(user_question=input_parsed["text"], topic_text=input_parsed["topic"])
ai_reply = raw_result.response
# STAGE 3 : REPLY
# log message for inteligent past QA
# use later for highlight (UX improve)
input_parsed["text"] = ai_reply
input_parsed["sender"] = "AI"
message_count += 1
input_parsed["message_number"] = message_count
ai_reply_hash = AzureTableManager.save_message_in_azure(input_parsed)
# send ans to caller in JSON form, to support metadata
# highlight text with relevant keywords relating to input query
html_str_llm = ChatTools.highlight_relevant_keywords_llm(question_text=user_question, answer_text=ai_reply)
followup_questions = ChatTools.generate_followup_questions_llm(keywords_text=user_question, main_text=ai_reply)
packed_box = ChatTools.package_reply_for_shippment(command=input_parsed["command"], text_html=html_str_llm, text=ai_reply, text_hash=ai_reply_hash, followup_questions=followup_questions)
await websocket.send_text(packed_box)
# FOLLOW-UP
if is_followup and all_checks_pass:
print("################ DETECTED: FOLLOW UP QUESTION ################")
#prepare needed data
primary_answer_hash = input_parsed["primary_answer_hash"] # base question to ask against
#based on hash get full question as pure text
primary_answer_data = AzureTableManager.read_from_table(session_id, primary_answer_hash)
primary_answer = primary_answer_data["text"]
#based on primary answer, back track to primary question
primary_question_msg_number = int(primary_answer_data["message_number"]) - 1 # go up 1 step
primary_question_data = AzureTableManager.read_from_table_message_number(session_id=session_id, message_number=primary_question_msg_number)
primary_question = primary_question_data["text"]
followup_question = input_parsed["text"] # single question sent by client
# NOTE: SPECIAL DOCS RETRIVEL
# create a new systhetic query, coposing all possible data
#to fetch more relevant context than 1st time question asked
#NOTE : this not for LLM only to fetch vectors, hence keep it clean and targeted to the docs sim score
synthetic_user_question = f"{primary_question}"
horoscope_predictions = chat_engines["MK7"].vector_index_search(topic = input_parsed["topic"], user_question=synthetic_user_question)
# get reply from LLM 🚅
ai_reply = ChatTools.answer_followup_questions_llm(primary_question=primary_question, primary_answer=primary_answer, horoscope_predictions=horoscope_predictions, followup_question=followup_question)
# SAVE AI REPLY
# log message for inteligent past QA
input_parsed["text"] = ai_reply
input_parsed["sender"] = "AI"
message_count += 1
input_parsed["message_number"] = message_count
ai_reply_hash = AzureTableManager.save_message_in_azure(input_parsed)
ai_reply_html = ChatTools.highlight_relevant_keywords_llm(question_text=user_question, answer_text=ai_reply)
followup_questions = ChatTools.generate_followup_questions_llm(keywords_text=user_question, main_text=ai_reply)
packed_box = ChatTools.package_reply_for_shippment(text_html=ai_reply_html, text=ai_reply, text_hash=ai_reply_hash, followup_questions=followup_questions)
await websocket.send_text(packed_box)
# end of line no conditions met
print("END OF LINE!")
# Handle failed gracefully
except Exception as e:
print(e)
# REGENERATE HOROSCOPE EMBEDINGS
# takes all horoscope predictions text and converts them into LLM embedding vectors
# which will be used later to run queries for search & AI chat
@app.post('/HoroscopeRegenerateEmbeddings')
async def horoscope_regenerate_embeddings(payload: RegenPayload):
from llama_index.core import Document, VectorStoreIndex
from llama_index.core import Settings
ChatTools.password_protect(payload.password) # password is Spotty
from langchain_core.documents import Document
import chromadb
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
# 1 : get all horoscope texts direct from VedAstro library
horoscopeDataList = HoroscopeDataListStatic.Rows
# repackage all horoscope data so LLM can understand (docs)
# format list nicely so LLM can swallow (llama_index nodes)
# so that llama index can understand vedastro predictions
# all_predictions_json = json.loads(HoroscopePrediction.ToJsonList(horoscopeDataList).ToString())
prediction_nodes = ChatTools.vedastro_predictions_to_llama_index_documents(horoscopeDataList)
# build index
index = VectorStoreIndex.from_documents(prediction_nodes, show_progress=True)
filePath = """vector_store/horoscope_data"""
index.storage_context.persist(persist_dir=filePath)
# todo commit to GitHub repo
# tell call all went well
return {"Status": "Pass", "Payload": f"Amen ✝️ complete, it took {11} min"}
# NOTE: below is another methood generating vectors used up till MK3
# benefit is based on CPU, via FAISS
@app.post('/HoroscopeRegenerateEmbeddingsLegacy')
async def horoscope_regenerate_embeddingsLegacy(payload: RegenPayload):
ChatTools.password_protect(payload.password) # password is Spotty
# LlamaIndexのインポート
from llama_index.core import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader
# dataフォルダ内の学習データを使い、インデックスを生成する
documents = SimpleDirectoryReader('data').load_data()
VSindex = VectorStoreIndex.from_documents(documents, show_progress=True)
Sindex = SummaryIndex.from_documents(documents, show_progress=True)
# 質問を実行
VSquery_engine = VSindex.as_query_engine()
Squery_engine = Sindex.as_query_engine()
# tell call all went well
return {"Status": "Pass", "Payload": f"Amen ✝️ complete, it took {11} min"}
# SUMMARISE PREDICTION TEXT
# JSON summarise data
@app.post('/SummarizePrediction')
async def summarize_prediction(payload: SummaryPayload):
ChatTools.password_protect(payload.password) # password is Spotty
from typing import List
import openai
from pydantic import BaseModel, Field
from enum import Enum
client = openai.OpenAI(base_url="https://api.endpoints.anyscale.com/v1", api_key=os.environ["ANYSCALE_API_KEY"])
class SpecializedSummary(BaseModel):
"""The format of the answer."""
Body: str = Field(description="related to physical body, health")
Mind: str = Field(description="related to state of mind, emotional state")
Family: str = Field(description="related to friends, family, people around us")
Romance: str = Field(description="related to romantic relationships, love, marriage")
Finance: str = Field(description="related to finances, money, income, and wealth")
Education: str = Field(description="related to studies, learning, education, academic pursuits, knowledge acquisition")
chat_completion = client.chat.completions.create(
model="mistralai/Mistral-7B-Instruct-v0.1", # check for more model
response_format={
"type": "json_object",
"schema": SpecializedSummary.model_json_schema()
},
messages=[{
"role": "system",
"content": f"Output JSON. Only use context. \n CONTEXT:{{{payload.input_text}}} "
}, {
"role": "user",
"content": payload.instruction_text
}],
temperature=payload.temperature)
return json.loads(chat_completion.choices[0].message.content)
# blind sighted on a monday afternoon in 2024
# brought my hand close to my nose only to smell the past
# winter in 2015 of a metal burnt with solid flower essence
#
# my beloved now stood then by me too,
# it is through her i see the past,
# and smile 😊
@app.post("/PresetQueryMatch")
async def preset_query_match(payload: TempPayload):
global preset_queries
global embeddings_creator
ChatTools.password_protect(payload.password) # password is Spotty
auto_reply = ChatTools.map_query_by_similarity(payload.query, payload.llm_model_name, preset_queries, embeddings_creator)
return auto_reply
# SERVER STUFF
# do init
initialize_chat_api()
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.SummaryIndex.from_documents"
] | [((816, 841), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""Chat API"""'}), "(title='Chat API')\n", (823, 841), False, 'from fastapi import HTTPException, FastAPI, websockets\n'), ((1553, 1570), 'chat_engine.ChatEngine', 'ChatEngine', (['"""MK7"""'], {}), "('MK7')\n", (1563, 1570), False, 'from chat_engine import ChatEngine\n'), ((18353, 18422), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['prediction_nodes'], {'show_progress': '(True)'}), '(prediction_nodes, show_progress=True)\n', (18384, 18422), False, 'from llama_index.core import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader\n'), ((19204, 19266), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'show_progress': '(True)'}), '(documents, show_progress=True)\n', (19235, 19266), False, 'from llama_index.core import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader\n'), ((19280, 19338), 'llama_index.core.SummaryIndex.from_documents', 'SummaryIndex.from_documents', (['documents'], {'show_progress': '(True)'}), '(documents, show_progress=True)\n', (19307, 19338), False, 'from llama_index.core import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader\n'), ((19899, 20007), 'openai.OpenAI', 'openai.OpenAI', ([], {'base_url': '"""https://api.endpoints.anyscale.com/v1"""', 'api_key': "os.environ['ANYSCALE_API_KEY']"}), "(base_url='https://api.endpoints.anyscale.com/v1', api_key=os.\n environ['ANYSCALE_API_KEY'])\n", (19912, 20007), False, 'import openai\n'), ((21205, 21259), 'json.loads', 'json.loads', (['chat_completion.choices[0].message.content'], {}), '(chat_completion.choices[0].message.content)\n', (21215, 21259), False, 'import json\n'), ((20105, 20158), 'pydantic.Field', 'Field', ([], {'description': '"""related to physical body, health"""'}), "(description='related to physical body, health')\n", (20110, 20158), False, 'from pydantic import BaseModel, Field\n'), ((20179, 20241), 'pydantic.Field', 'Field', ([], {'description': '"""related to state of mind, emotional state"""'}), "(description='related to state of mind, emotional state')\n", (20184, 20241), False, 'from pydantic import BaseModel, Field\n'), ((20264, 20329), 'pydantic.Field', 'Field', ([], {'description': '"""related to friends, family, people around us"""'}), "(description='related to friends, family, people around us')\n", (20269, 20329), False, 'from pydantic import BaseModel, Field\n'), ((20353, 20423), 'pydantic.Field', 'Field', ([], {'description': '"""related to romantic relationships, love, marriage"""'}), "(description='related to romantic relationships, love, marriage')\n", (20358, 20423), False, 'from pydantic import BaseModel, Field\n'), ((20447, 20514), 'pydantic.Field', 'Field', ([], {'description': '"""related to finances, money, income, and wealth"""'}), "(description='related to finances, money, income, and wealth')\n", (20452, 20514), False, 'from pydantic import BaseModel, Field\n'), ((20540, 20652), 'pydantic.Field', 'Field', ([], {'description': '"""related to studies, learning, education, academic pursuits, knowledge acquisition"""'}), "(description=\n 'related to studies, learning, education, academic pursuits, knowledge acquisition'\n )\n", (20545, 20652), False, 'from pydantic import BaseModel, Field\n'), ((6369, 6397), 'json.loads', 'json.loads', (['client_input_raw'], {}), '(client_input_raw)\n', (6379, 6397), False, 'import json\n'), ((11135, 11184), 'numpy.append', 'np.append', (["input_parsed['command']", '"""no_feedback"""'], {}), "(input_parsed['command'], 'no_feedback')\n", (11144, 11184), True, 'import numpy as np\n'), ((11557, 11607), 'numpy.where', 'np.where', (["(input_parsed['command'] == 'no_feedback')"], {}), "(input_parsed['command'] == 'no_feedback')\n", (11565, 11607), True, 'import numpy as np\n'), ((11646, 11687), 'numpy.delete', 'np.delete', (["input_parsed['command']", 'index'], {}), "(input_parsed['command'], index)\n", (11655, 11687), True, 'import numpy as np\n'), ((19148, 19177), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (19169, 19177), False, 'from llama_index.core import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader\n'), ((6632, 6665), 'numpy.array', 'np.array', (["input_parsed['command']"], {}), "(input_parsed['command'])\n", (6640, 6665), True, 'import numpy as np\n'), ((6789, 6803), 'numpy.array', 'np.array', (["['']"], {}), "([''])\n", (6797, 6803), True, 'import numpy as np\n'), ((8354, 8404), 'numpy.append', 'np.append', (["input_parsed['command']", '"""please_login"""'], {}), "(input_parsed['command'], 'please_login')\n", (8363, 8404), True, 'import numpy as np\n'), ((5659, 5684), 'numpy.array', 'np.array', (["['no_feedback']"], {}), "(['no_feedback'])\n", (5667, 5684), True, 'import numpy as np\n')] |
import os
import pygments
from prompt_toolkit import print_formatted_text
from prompt_toolkit.formatted_text import PygmentsTokens
from pygments_markdown_lexer import MarkdownLexer
from typing import Any
from langchain import ConversationChain, OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.chains.conversation.memory import ConversationBufferMemory
from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage
from gpttrace.config import cfg
def get_doc_content_for_query(index: VectorStoreIndex, query: str) -> str:
"""
Find the content from the document that is closest to the user's request
:param index: Vector database
:param query: User's request
:return: The content that is most relevant to the user's request.
"""
query_engine = index.as_query_engine()
response = query_engine.query(query)
related_contents = response.source_nodes
if related_contents is not None:
contents = "\nThere are some related information about this query:\n"
for i, content in enumerate(related_contents):
info = f"Info {i}: {content.node.get_text()}\n"
contents += info
return contents
else:
return None
def pretty_print(input_info: str, lexer: Any = MarkdownLexer, *args: Any, **kwargs: Any) -> None:
"""
This function takes an input string and a lexer (default is MarkdownLexer),
lexes the input using the provided lexer, and then pretty prints the lexed tokens.
:param input: The string to be lexed and pretty printed.
:param lexer: The lexer to use for lexing the input. Defaults to MarkdownLexer.
:param args: Additional arguments to be passed to the print_formatted_text function.
:param kwargs: Additional keyword arguments to be passed to the print_formatted_text function.
"""
tokens = list(pygments.lex(input_info, lexer=lexer()))
print_formatted_text(PygmentsTokens(tokens), *args, **kwargs)
def init_conversation(need_train: bool, verbose: bool) -> list[ConversationChain, VectorStoreIndex]:
"""
Initialize the conversation and vector database.
:param need_train: Whether you need to use a vector database.
:verbose: Whether to print extra information.
:return: Containing two elements: The ConversationChain object is a conversation between a human and an AI. The VectorStoreIndex object is vector database.
"""
model_name = cfg.get("DEFAULT_MODEL")
llm = ChatOpenAI(model_name=model_name, temperature=0)
agent_chain = ConversationChain(llm=llm, verbose=verbose,
memory=ConversationBufferMemory())
if need_train:
vector_path = cfg.get("VECTOR_DATABASE_PATH")
if not os.path.exists(vector_path):
print(f"{vector_path} not found. Training...")
md_files = []
# Get all markdown files in the tutorial
for root, _, files in os.walk(cfg.get("DOC_PATH")):
for file in files:
if file.endswith('.md'):
md_files.append(os.path.join(root, file))
print(f":: {cfg.get('DOC_PATH')}, {md_files}")
documents = SimpleDirectoryReader(input_files=md_files).load_data()
llm_predictor = LLMPredictor(llm=OpenAI(
temperature=0, model_name="text-davinci-003"))
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor)
index = VectorStoreIndex.from_documents(
documents, service_context=service_context)
index.storage_context.persist(vector_path)
print(
f"Training completed, {vector_path} has been saved.")
else:
print(f"Loading the {vector_path}...")
storage_context = StorageContext.from_defaults(
persist_dir=vector_path)
index = load_index_from_storage(storage_context)
else:
index = None
return agent_chain, index
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((2499, 2523), 'gpttrace.config.cfg.get', 'cfg.get', (['"""DEFAULT_MODEL"""'], {}), "('DEFAULT_MODEL')\n", (2506, 2523), False, 'from gpttrace.config import cfg\n'), ((2534, 2582), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name', 'temperature': '(0)'}), '(model_name=model_name, temperature=0)\n', (2544, 2582), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1993, 2015), 'prompt_toolkit.formatted_text.PygmentsTokens', 'PygmentsTokens', (['tokens'], {}), '(tokens)\n', (2007, 2015), False, 'from prompt_toolkit.formatted_text import PygmentsTokens\n'), ((2757, 2788), 'gpttrace.config.cfg.get', 'cfg.get', (['"""VECTOR_DATABASE_PATH"""'], {}), "('VECTOR_DATABASE_PATH')\n", (2764, 2788), False, 'from gpttrace.config import cfg\n'), ((2688, 2714), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (2712, 2714), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((2804, 2831), 'os.path.exists', 'os.path.exists', (['vector_path'], {}), '(vector_path)\n', (2818, 2831), False, 'import os\n'), ((3466, 3523), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (3494, 3523), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage\n'), ((3561, 3636), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (3592, 3636), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage\n'), ((3893, 3946), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'vector_path'}), '(persist_dir=vector_path)\n', (3921, 3946), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage\n'), ((3984, 4024), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (4007, 4024), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage\n'), ((3013, 3032), 'gpttrace.config.cfg.get', 'cfg.get', (['"""DOC_PATH"""'], {}), "('DOC_PATH')\n", (3020, 3032), False, 'from gpttrace.config import cfg\n'), ((3264, 3307), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'md_files'}), '(input_files=md_files)\n', (3285, 3307), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage\n'), ((3365, 3417), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0, model_name='text-davinci-003')\n", (3371, 3417), False, 'from langchain import ConversationChain, OpenAI\n'), ((3205, 3224), 'gpttrace.config.cfg.get', 'cfg.get', (['"""DOC_PATH"""'], {}), "('DOC_PATH')\n", (3212, 3224), False, 'from gpttrace.config import cfg\n'), ((3155, 3179), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (3167, 3179), False, 'import os\n')] |
from typing import List
import streamlit as st
import os, shutil
from llama_index import VectorStoreIndex, ServiceContext, Document
from llama_index.llms import OpenAI
from llama_index import SimpleDirectoryReader
def load_data():
reader = SimpleDirectoryReader(input_dir="src/data", recursive=True)
docs = reader.load_data()
return docs
def RAG(_config, _docs):
service_context = ServiceContext.from_defaults(
llm=OpenAI(
model=_config.gpt_model,
temperature=_config.temperature,
max_tokens=_config.max_tokens,
system_prompt=_config.llm_system_role,
),
chunk_size=_config.chunk_size,
)
index = VectorStoreIndex.from_documents(_docs, service_context=service_context)
return index
def delete_data():
print("Cleaning the data folder")
folder = "src/data"
for filename in os.listdir(folder):
if filename != ".gitignore":
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print("Failed to delete %s. Reason: %s" % (file_path, e))
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.OpenAI"
] | [((246, 305), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""src/data"""', 'recursive': '(True)'}), "(input_dir='src/data', recursive=True)\n", (267, 305), False, 'from llama_index import SimpleDirectoryReader\n'), ((695, 766), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['_docs'], {'service_context': 'service_context'}), '(_docs, service_context=service_context)\n', (726, 766), False, 'from llama_index import VectorStoreIndex, ServiceContext, Document\n'), ((887, 905), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (897, 905), False, 'import os, shutil\n'), ((443, 582), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '_config.gpt_model', 'temperature': '_config.temperature', 'max_tokens': '_config.max_tokens', 'system_prompt': '_config.llm_system_role'}), '(model=_config.gpt_model, temperature=_config.temperature, max_tokens\n =_config.max_tokens, system_prompt=_config.llm_system_role)\n', (449, 582), False, 'from llama_index.llms import OpenAI\n'), ((968, 998), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (980, 998), False, 'import os, shutil\n'), ((1035, 1060), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1049, 1060), False, 'import os, shutil\n'), ((1064, 1089), 'os.path.islink', 'os.path.islink', (['file_path'], {}), '(file_path)\n', (1078, 1089), False, 'import os, shutil\n'), ((1111, 1131), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (1120, 1131), False, 'import os, shutil\n'), ((1153, 1177), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (1166, 1177), False, 'import os, shutil\n'), ((1199, 1223), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (1212, 1223), False, 'import os, shutil\n')] |
import os
import logging
from typing import Iterator
from llama_index import (
LLMPredictor,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
get_response_synthesizer,
ServiceContext,
)
from langchain.chat_models import ChatOpenAI
from llama_index.indices.postprocessor import SimilarityPostprocessor
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
from llama_index.retrievers import VectorIndexRetriever
from llama_index.callbacks.base import CallbackManager
from llama_index.callbacks.base import BaseCallbackHandler
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
from llama_index.node_parser import SimpleNodeParser
from llama_index.text_splitter import TokenTextSplitter
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class QuantSimpleVectorStorage:
"""
A class that represents a simple vector storage for a GPT model.
Attributes:
persist_dir (str): The directory where the index and other data will be persisted.
gpt_model (str): The name of the GPT model to use for predictions.
gpt_temperature (float): The temperature to use for GPT predictions.
source_folder (str): The folder containing the markdown files to use for indexing.
"""
class QuantSimpleVectorStorage:
def __init__(self, persist_dir: str, gpt_model: str, gpt_temperature: float, source_folder: str):
# collect arguments
self.persist_dir = persist_dir
self.gpt_model = gpt_model
self.gpt_temperature = gpt_temperature
self.source_folder = source_folder
# initialize attributes
self.index = None
self.llm_predictor = self.create_llm_predictor()
# setup index
self.setup_index()
def list_sources(self) -> Iterator[str]:
"""
Returns an iterator over the paths of all markdown files in the source folder, excluding certain files.
Excluded files:
- api.md
- all_pages.md
- unknown.md
- chainlit.md
"""
exclude_files = {"api.md", "all_pages.md", "unknown.md", "chainlit.md"}
for root, _, files in os.walk(self.source_folder):
for file in files:
if file.endswith(".md") and file not in exclude_files:
yield os.path.join(root, file)
def create_llm_predictor(self) -> LLMPredictor:
"""
Sets up and returns an instance of the LLMPredictor class, which uses the ChatOpenAI class
to generate predictions based on the GPT model specified by the `gpt_model` attribute of this
SimpleVectorStorage instance.
Returns:
An instance of the LLMPredictor class.
"""
return LLMPredictor(
llm=ChatOpenAI(
temperature=self.gpt_temperature,
model_name=self.gpt_model,
max_tokens=2048,
streaming=True,
),
)
def load_index_nodes(self):
logger.info('Loading documents...')
text_splitter = TokenTextSplitter(
separator="\n## ", chunk_size=1024, chunk_overlap=0)
node_parser = SimpleNodeParser.from_defaults(
text_splitter=text_splitter,
)
documents = SimpleDirectoryReader(
input_files=self.list_sources()).load_data()
index_nodes = node_parser.get_nodes_from_documents(
documents, show_progress=True)
return index_nodes
def create_index(self):
logger.info('Building index...')
index = VectorStoreIndex(
nodes=self.load_index_nodes(),
show_progress=True,
service_context=ServiceContext.from_defaults(
llm_predictor=self.llm_predictor,
)
)
return index
def setup_index(self):
"""
Sets up the index for the vector store. If the index is already present in the storage context, it is loaded
from there. Otherwise, a new index is built from the markdown files in the input directory and saved to the
storage context for future use.
"""
try:
logger.info('Loading index...')
storage_context = StorageContext.from_defaults(
persist_dir=self.persist_dir)
self.index = load_index_from_storage(storage_context)
except Exception as e:
logger.info('Persisted Index not found, building new one.')
# create index
self.index = self.create_index()
logger.info('Saving index...')
self.index.storage_context.persist(persist_dir=self.persist_dir)
def create_service_context(self, callback_handler: BaseCallbackHandler = None) -> ServiceContext:
"""
Creates a new ServiceContext instance with default settings.
Returns:
A new ServiceContext instance.
"""
llm_predictor = self.create_llm_predictor()
return ServiceContext.from_defaults(
llm_predictor=llm_predictor,
chunk_size=1024,
callback_manager=CallbackManager([callback_handler])
)
def create_query_engine(self, callback_handler: BaseCallbackHandler = None) -> RetrieverQueryEngine:
"""
Creates a RetrieverQueryEngine object with the configured VectorIndexRetriever and response synthesizer.
Returns:
RetrieverQueryEngine: The created RetrieverQueryEngine object.
"""
service_context = self.create_service_context(callback_handler)
# Configure retriever within the service context
retriever = VectorIndexRetriever(
index=self.index,
similarity_top_k=20,
)
# Configure response synthesizer within the service context
response_synthesizer = get_response_synthesizer(
response_mode="tree_summarize", service_context=service_context)
# Assemble query engine
query_engine = RetrieverQueryEngine.from_args(
streaming=True,
retriever=retriever,
response_synthesizer=response_synthesizer,
service_context=service_context,
node_postprocessors=[
SimilarityPostprocessor(similarity_cutoff=0.73)
]
)
return query_engine
| [
"llama_index.get_response_synthesizer",
"llama_index.ServiceContext.from_defaults",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.StorageContext.from_defaults",
"llama_index.text_splitter.TokenTextSplitter",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.postprocessor.SimilarityPostprocessor",
"llama_index.load_index_from_storage"
] | [((780, 819), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (799, 819), False, 'import logging\n'), ((829, 856), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (846, 856), False, 'import logging\n'), ((2237, 2264), 'os.walk', 'os.walk', (['self.source_folder'], {}), '(self.source_folder)\n', (2244, 2264), False, 'import os\n'), ((3150, 3220), 'llama_index.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'separator': '"""\n## """', 'chunk_size': '(1024)', 'chunk_overlap': '(0)'}), "(separator='\\n## ', chunk_size=1024, chunk_overlap=0)\n", (3167, 3220), False, 'from llama_index.text_splitter import TokenTextSplitter\n'), ((3257, 3316), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'text_splitter': 'text_splitter'}), '(text_splitter=text_splitter)\n', (3287, 3316), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((5753, 5812), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'self.index', 'similarity_top_k': '(20)'}), '(index=self.index, similarity_top_k=20)\n', (5773, 5812), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((5948, 6042), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': '"""tree_summarize"""', 'service_context': 'service_context'}), "(response_mode='tree_summarize', service_context=\n service_context)\n", (5972, 6042), False, 'from llama_index import LLMPredictor, StorageContext, VectorStoreIndex, load_index_from_storage, get_response_synthesizer, ServiceContext\n'), ((4319, 4377), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'self.persist_dir'}), '(persist_dir=self.persist_dir)\n', (4347, 4377), False, 'from llama_index import LLMPredictor, StorageContext, VectorStoreIndex, load_index_from_storage, get_response_synthesizer, ServiceContext\n'), ((4420, 4460), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (4443, 4460), False, 'from llama_index import LLMPredictor, StorageContext, VectorStoreIndex, load_index_from_storage, get_response_synthesizer, ServiceContext\n'), ((2853, 2961), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'self.gpt_temperature', 'model_name': 'self.gpt_model', 'max_tokens': '(2048)', 'streaming': '(True)'}), '(temperature=self.gpt_temperature, model_name=self.gpt_model,\n max_tokens=2048, streaming=True)\n', (2863, 2961), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3781, 3843), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'self.llm_predictor'}), '(llm_predictor=self.llm_predictor)\n', (3809, 3843), False, 'from llama_index import LLMPredictor, StorageContext, VectorStoreIndex, load_index_from_storage, get_response_synthesizer, ServiceContext\n'), ((5216, 5251), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[callback_handler]'], {}), '([callback_handler])\n', (5231, 5251), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6350, 6397), 'llama_index.indices.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': '(0.73)'}), '(similarity_cutoff=0.73)\n', (6373, 6397), False, 'from llama_index.indices.postprocessor import SimilarityPostprocessor\n'), ((2394, 2418), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2406, 2418), False, 'import os\n')] |
import json
from typing import Any, Hashable
import nest_asyncio
import pandas as pd
import requests
from llama_index import ServiceContext
from llama_index.llms import OpenAI
from ragas import metrics
from tenacity import retry, stop_after_attempt, wait_random_exponential
from tqdm import tqdm
from wandbot.evaluation.eval.correctness import (
CORRECTNESS_EVAL_TEMPLATE,
WandbCorrectnessEvaluator,
)
from wandbot.evaluation.eval.factfulness import (
FACTFULNESS_EVAL_TEMPLATE,
WandbFactfulnessEvaluator,
)
from wandbot.evaluation.eval.relevancy import (
RELEVANCY_EVAL_TEMPLATE,
WandbRelevancyEvaluator,
)
from wandbot.utils import cachew, get_logger
logger = get_logger(__name__)
nest_asyncio.apply()
EVAL_CACHE = "data/cache/eval_cache/cache.db"
service_context = ServiceContext.from_defaults(llm=OpenAI("gpt-4-1106-preview"))
correctness_evaluator = WandbCorrectnessEvaluator(
service_context=service_context,
eval_template=CORRECTNESS_EVAL_TEMPLATE,
)
faithfulness_evaluator = WandbFactfulnessEvaluator(
service_context=service_context,
eval_template=FACTFULNESS_EVAL_TEMPLATE,
)
relevancy_evaluator = WandbRelevancyEvaluator(
service_context=service_context,
eval_template=RELEVANCY_EVAL_TEMPLATE,
)
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
@cachew(cache_path=EVAL_CACHE, logger=logger)
def get_answer(question: str, application: str = "api-eval-bharat") -> str:
url = "http://0.0.0.0:8000/query"
payload = {
"question": question,
"language": "en",
"application": application,
}
response = requests.post(url, data=json.dumps(payload))
response = response.json()
return json.dumps(response)
@cachew(cache_path=EVAL_CACHE, logger=logger)
def get_eval_record(row_str: str, application: str = "api-eval-bharat") -> str:
row = json.loads(row_str)
response = get_answer(row["question"], application=application)
response = json.loads(response)
response["ground_truths"] = row["answer"]
response["reference_notes"] = row["notes"]
response["contexts"] = [
"\nSource: " + source["source"] + " \n " + source["text"]
for source in json.loads(response["source_documents"])
]
response = json.dumps(response)
return response
def parse_answer_eval(metric: str, row: dict[str, Any]) -> dict[str, Any]:
result = {
f"{metric}_score": row.get("score"),
f"{metric}_result": row.get("passing"),
f"{metric}_reason": row.get("feedback"),
}
return result
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
@cachew(cache_path=EVAL_CACHE, logger=logger)
def get_answer_correctness(row_str: str) -> str:
row = json.loads(row_str)
result = correctness_evaluator.evaluate(
query=row["question"],
response=row["answer"],
reference=row["ground_truths"],
contexts=row["contexts"],
reference_notes=row["reference_notes"],
)
result = parse_answer_eval("answer_correctness", result.dict())
result[
"answer_correctness_score_(ragas)"
] = metrics.answer_correctness.score_single(row)
result = json.dumps(result)
return result
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
@cachew(cache_path=EVAL_CACHE, logger=logger)
def get_answer_relevancy(row_str: str) -> str:
row = json.loads(row_str)
result = relevancy_evaluator.evaluate(
query=row["question"],
response=row["answer"],
contexts=row["contexts"],
reference=row["ground_truths"],
)
result = parse_answer_eval("answer_relevancy", result.dict())
result[
"answer_relevancy_score_(ragas)"
] = metrics.answer_relevancy.score_single(row)
result = json.dumps(result)
return result
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
@cachew(cache_path=EVAL_CACHE, logger=logger)
def get_answer_faithfulness(row_str: str) -> str:
row = json.loads(row_str)
result = faithfulness_evaluator.evaluate(
query=row["question"],
response=row["answer"],
contexts=row["contexts"],
reference=row["ground_truths"],
)
result = parse_answer_eval("answer_faithfulness", result.dict())
result[
"answer_faithfulness_score_(ragas)"
] = metrics.faithfulness.score_single(row)
result = json.dumps(result)
return result
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
@cachew(cache_path=EVAL_CACHE, logger=logger)
def get_answer_similarity(row_str: str) -> str:
row = json.loads(row_str)
result = metrics.answer_similarity.score_single(row)
result = json.dumps({"answer_similarity_score_(ragas)": result})
return result
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
@cachew(cache_path=EVAL_CACHE, logger=logger)
def get_context_precision(row_str: str) -> str:
row = json.loads(row_str)
result = metrics.context_precision.score_single(row)
result = json.dumps({"context_precision_score": result})
return result
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
@cachew(cache_path=EVAL_CACHE, logger=logger)
def get_context_recall(row_str: str) -> str:
row = json.loads(row_str)
result = metrics.context_recall.score_single(row)
result = json.dumps({"context_recall_score": result})
return result
@cachew(cache_path=EVAL_CACHE, logger=logger)
def evaluate_row(idx: Hashable, row_str: str) -> str:
eval_result = {"idx": idx}
row = json.loads(row_str)
eval_result.update(row)
eval_result.update(json.loads(get_answer_correctness(row_str)))
eval_result.update(json.loads(get_answer_relevancy(row_str)))
eval_result.update(json.loads(get_answer_faithfulness(row_str)))
eval_result.update(json.loads(get_answer_similarity(row_str)))
eval_result.update(json.loads(get_context_precision(row_str)))
eval_result.update(json.loads(get_context_recall(row_str)))
eval_result = json.dumps(eval_result)
return eval_result
@cachew(cache_path=EVAL_CACHE, logger=logger)
def process_row(
idx: Hashable, row_str: str, application: str = "api-eval-bharat"
) -> str:
eval_record = get_eval_record(row_str, application=application)
eval_row = evaluate_row(idx, eval_record)
return eval_row
def main():
eval_results = []
df = pd.read_json(
"data/eval/wandbot_cleaned_annotated_dataset_11-12-2023.jsonl",
lines=True,
orient="records",
)
correct_df = df[
(df["is_wandb_query"] == "YES") & (df["correctness"] == "correct")
]
with open(
"data/eval/wandbot-gpt-4-1106-preview-eval-v1-1.jsonl", "w+"
) as outfile:
for idx, row in tqdm(correct_df.iterrows(), total=len(correct_df)):
try:
row_str = row.to_json()
eval_row = process_row(
idx,
row_str,
application="wandbot-gpt-4-1106-preview-eval-v1.1-bharat",
)
outfile.write(eval_row + "\n")
eval_results.append(eval_row)
except Exception as e:
print(e)
print(idx)
if __name__ == "__main__":
main()
| [
"llama_index.llms.OpenAI"
] | [((689, 709), 'wandbot.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (699, 709), False, 'from wandbot.utils import cachew, get_logger\n'), ((711, 731), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (729, 731), False, 'import nest_asyncio\n'), ((885, 989), 'wandbot.evaluation.eval.correctness.WandbCorrectnessEvaluator', 'WandbCorrectnessEvaluator', ([], {'service_context': 'service_context', 'eval_template': 'CORRECTNESS_EVAL_TEMPLATE'}), '(service_context=service_context, eval_template=\n CORRECTNESS_EVAL_TEMPLATE)\n', (910, 989), False, 'from wandbot.evaluation.eval.correctness import CORRECTNESS_EVAL_TEMPLATE, WandbCorrectnessEvaluator\n'), ((1021, 1125), 'wandbot.evaluation.eval.factfulness.WandbFactfulnessEvaluator', 'WandbFactfulnessEvaluator', ([], {'service_context': 'service_context', 'eval_template': 'FACTFULNESS_EVAL_TEMPLATE'}), '(service_context=service_context, eval_template=\n FACTFULNESS_EVAL_TEMPLATE)\n', (1046, 1125), False, 'from wandbot.evaluation.eval.factfulness import FACTFULNESS_EVAL_TEMPLATE, WandbFactfulnessEvaluator\n'), ((1154, 1254), 'wandbot.evaluation.eval.relevancy.WandbRelevancyEvaluator', 'WandbRelevancyEvaluator', ([], {'service_context': 'service_context', 'eval_template': 'RELEVANCY_EVAL_TEMPLATE'}), '(service_context=service_context, eval_template=\n RELEVANCY_EVAL_TEMPLATE)\n', (1177, 1254), False, 'from wandbot.evaluation.eval.relevancy import RELEVANCY_EVAL_TEMPLATE, WandbRelevancyEvaluator\n'), ((1344, 1388), 'wandbot.utils.cachew', 'cachew', ([], {'cache_path': 'EVAL_CACHE', 'logger': 'logger'}), '(cache_path=EVAL_CACHE, logger=logger)\n', (1350, 1388), False, 'from wandbot.utils import cachew, get_logger\n'), ((1743, 1787), 'wandbot.utils.cachew', 'cachew', ([], {'cache_path': 'EVAL_CACHE', 'logger': 'logger'}), '(cache_path=EVAL_CACHE, logger=logger)\n', (1749, 1787), False, 'from wandbot.utils import cachew, get_logger\n'), ((2656, 2700), 'wandbot.utils.cachew', 'cachew', ([], {'cache_path': 'EVAL_CACHE', 'logger': 'logger'}), '(cache_path=EVAL_CACHE, logger=logger)\n', (2662, 2700), False, 'from wandbot.utils import cachew, get_logger\n'), ((3325, 3369), 'wandbot.utils.cachew', 'cachew', ([], {'cache_path': 'EVAL_CACHE', 'logger': 'logger'}), '(cache_path=EVAL_CACHE, logger=logger)\n', (3331, 3369), False, 'from wandbot.utils import cachew, get_logger\n'), ((3936, 3980), 'wandbot.utils.cachew', 'cachew', ([], {'cache_path': 'EVAL_CACHE', 'logger': 'logger'}), '(cache_path=EVAL_CACHE, logger=logger)\n', (3942, 3980), False, 'from wandbot.utils import cachew, get_logger\n'), ((4556, 4600), 'wandbot.utils.cachew', 'cachew', ([], {'cache_path': 'EVAL_CACHE', 'logger': 'logger'}), '(cache_path=EVAL_CACHE, logger=logger)\n', (4562, 4600), False, 'from wandbot.utils import cachew, get_logger\n'), ((4906, 4950), 'wandbot.utils.cachew', 'cachew', ([], {'cache_path': 'EVAL_CACHE', 'logger': 'logger'}), '(cache_path=EVAL_CACHE, logger=logger)\n', (4912, 4950), False, 'from wandbot.utils import cachew, get_logger\n'), ((5248, 5292), 'wandbot.utils.cachew', 'cachew', ([], {'cache_path': 'EVAL_CACHE', 'logger': 'logger'}), '(cache_path=EVAL_CACHE, logger=logger)\n', (5254, 5292), False, 'from wandbot.utils import cachew, get_logger\n'), ((5501, 5545), 'wandbot.utils.cachew', 'cachew', ([], {'cache_path': 'EVAL_CACHE', 'logger': 'logger'}), '(cache_path=EVAL_CACHE, logger=logger)\n', (5507, 5545), False, 'from wandbot.utils import cachew, get_logger\n'), ((6158, 6202), 'wandbot.utils.cachew', 'cachew', ([], {'cache_path': 'EVAL_CACHE', 'logger': 'logger'}), '(cache_path=EVAL_CACHE, logger=logger)\n', (6164, 6202), False, 'from wandbot.utils import cachew, get_logger\n'), ((1719, 1739), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (1729, 1739), False, 'import json\n'), ((1878, 1897), 'json.loads', 'json.loads', (['row_str'], {}), '(row_str)\n', (1888, 1897), False, 'import json\n'), ((1981, 2001), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (1991, 2001), False, 'import json\n'), ((2274, 2294), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (2284, 2294), False, 'import json\n'), ((2760, 2779), 'json.loads', 'json.loads', (['row_str'], {}), '(row_str)\n', (2770, 2779), False, 'import json\n'), ((3147, 3191), 'ragas.metrics.answer_correctness.score_single', 'metrics.answer_correctness.score_single', (['row'], {}), '(row)\n', (3186, 3191), False, 'from ragas import metrics\n'), ((3205, 3223), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (3215, 3223), False, 'import json\n'), ((3427, 3446), 'json.loads', 'json.loads', (['row_str'], {}), '(row_str)\n', (3437, 3446), False, 'import json\n'), ((3760, 3802), 'ragas.metrics.answer_relevancy.score_single', 'metrics.answer_relevancy.score_single', (['row'], {}), '(row)\n', (3797, 3802), False, 'from ragas import metrics\n'), ((3816, 3834), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (3826, 3834), False, 'import json\n'), ((4041, 4060), 'json.loads', 'json.loads', (['row_str'], {}), '(row_str)\n', (4051, 4060), False, 'import json\n'), ((4384, 4422), 'ragas.metrics.faithfulness.score_single', 'metrics.faithfulness.score_single', (['row'], {}), '(row)\n', (4417, 4422), False, 'from ragas import metrics\n'), ((4436, 4454), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (4446, 4454), False, 'import json\n'), ((4659, 4678), 'json.loads', 'json.loads', (['row_str'], {}), '(row_str)\n', (4669, 4678), False, 'import json\n'), ((4692, 4735), 'ragas.metrics.answer_similarity.score_single', 'metrics.answer_similarity.score_single', (['row'], {}), '(row)\n', (4730, 4735), False, 'from ragas import metrics\n'), ((4749, 4804), 'json.dumps', 'json.dumps', (["{'answer_similarity_score_(ragas)': result}"], {}), "({'answer_similarity_score_(ragas)': result})\n", (4759, 4804), False, 'import json\n'), ((5009, 5028), 'json.loads', 'json.loads', (['row_str'], {}), '(row_str)\n', (5019, 5028), False, 'import json\n'), ((5042, 5085), 'ragas.metrics.context_precision.score_single', 'metrics.context_precision.score_single', (['row'], {}), '(row)\n', (5080, 5085), False, 'from ragas import metrics\n'), ((5099, 5146), 'json.dumps', 'json.dumps', (["{'context_precision_score': result}"], {}), "({'context_precision_score': result})\n", (5109, 5146), False, 'import json\n'), ((5348, 5367), 'json.loads', 'json.loads', (['row_str'], {}), '(row_str)\n', (5358, 5367), False, 'import json\n'), ((5381, 5421), 'ragas.metrics.context_recall.score_single', 'metrics.context_recall.score_single', (['row'], {}), '(row)\n', (5416, 5421), False, 'from ragas import metrics\n'), ((5435, 5479), 'json.dumps', 'json.dumps', (["{'context_recall_score': result}"], {}), "({'context_recall_score': result})\n", (5445, 5479), False, 'import json\n'), ((5641, 5660), 'json.loads', 'json.loads', (['row_str'], {}), '(row_str)\n', (5651, 5660), False, 'import json\n'), ((6108, 6131), 'json.dumps', 'json.dumps', (['eval_result'], {}), '(eval_result)\n', (6118, 6131), False, 'import json\n'), ((6480, 6590), 'pandas.read_json', 'pd.read_json', (['"""data/eval/wandbot_cleaned_annotated_dataset_11-12-2023.jsonl"""'], {'lines': '(True)', 'orient': '"""records"""'}), "('data/eval/wandbot_cleaned_annotated_dataset_11-12-2023.jsonl',\n lines=True, orient='records')\n", (6492, 6590), True, 'import pandas as pd\n'), ((831, 859), 'llama_index.llms.OpenAI', 'OpenAI', (['"""gpt-4-1106-preview"""'], {}), "('gpt-4-1106-preview')\n", (837, 859), False, 'from llama_index.llms import OpenAI\n'), ((1275, 1313), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'min': '(1)', 'max': '(60)'}), '(min=1, max=60)\n', (1298, 1313), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((1320, 1341), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(6)'], {}), '(6)\n', (1338, 1341), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((2587, 2625), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'min': '(1)', 'max': '(60)'}), '(min=1, max=60)\n', (2610, 2625), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((2632, 2653), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(6)'], {}), '(6)\n', (2650, 2653), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((3256, 3294), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'min': '(1)', 'max': '(60)'}), '(min=1, max=60)\n', (3279, 3294), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((3301, 3322), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(6)'], {}), '(6)\n', (3319, 3322), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((3867, 3905), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'min': '(1)', 'max': '(60)'}), '(min=1, max=60)\n', (3890, 3905), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((3912, 3933), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(6)'], {}), '(6)\n', (3930, 3933), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((4487, 4525), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'min': '(1)', 'max': '(60)'}), '(min=1, max=60)\n', (4510, 4525), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((4532, 4553), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(6)'], {}), '(6)\n', (4550, 4553), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((4837, 4875), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'min': '(1)', 'max': '(60)'}), '(min=1, max=60)\n', (4860, 4875), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((4882, 4903), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(6)'], {}), '(6)\n', (4900, 4903), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((5179, 5217), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'min': '(1)', 'max': '(60)'}), '(min=1, max=60)\n', (5202, 5217), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((5224, 5245), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(6)'], {}), '(6)\n', (5242, 5245), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((1656, 1675), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1666, 1675), False, 'import json\n'), ((2212, 2252), 'json.loads', 'json.loads', (["response['source_documents']"], {}), "(response['source_documents'])\n", (2222, 2252), False, 'import json\n')] |
from typing import List
from fastapi.responses import StreamingResponse
from llama_index.chat_engine.types import BaseChatEngine
from app.engine.index import get_chat_engine
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import ChatMessage
from llama_index.llms.types import MessageRole
from pydantic import BaseModel
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
class _ChatData(BaseModel):
messages: List[_Message]
@r.post("")
async def chat(
request: Request,
data: _ChatData,
chat_engine: BaseChatEngine = Depends(get_chat_engine),
):
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
response = await chat_engine.astream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
async for token in response.async_response_gen():
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain")
| [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((636, 660), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (643, 660), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((758, 848), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (771, 848), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((976, 1076), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (989, 1076), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1199, 1242), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1210, 1242), False, 'from llama_index.llms.base import ChatMessage\n')] |
# Copyright (c) Timescale, Inc. (2023)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import streamlit as st
from streamlit.hello.utils import show_code
from llama_index.vector_stores import TimescaleVectorStore
from llama_index import ServiceContext, StorageContext
from llama_index.indices.vector_store import VectorStoreIndex
from llama_index.llms import OpenAI
from llama_index import set_global_service_context
import pandas as pd
from pathlib import Path
from datetime import datetime, timedelta
from timescale_vector import client
from typing import List, Tuple
from llama_index.schema import TextNode
from llama_index.embeddings import OpenAIEmbedding
import psycopg2
def get_repos():
with psycopg2.connect(dsn=st.secrets["TIMESCALE_SERVICE_URL"]) as connection:
# Create a cursor within the context manager
with connection.cursor() as cursor:
try:
select_data_sql = "SELECT * FROM time_machine_catalog;"
cursor.execute(select_data_sql)
except psycopg2.errors.UndefinedTable as e:
return {}
catalog_entries = cursor.fetchall()
catalog_dict = {}
for entry in catalog_entries:
repo_url, table_name = entry
catalog_dict[repo_url] = table_name
return catalog_dict
def get_auto_retriever(index, retriever_args):
from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="Description of the commits to PostgreSQL. Describes changes made to Postgres",
metadata_info=[
MetadataInfo(
name="commit_hash",
type="str",
description="Commit Hash",
),
MetadataInfo(
name="author",
type="str",
description="Author of the commit",
),
MetadataInfo(
name="__start_date",
type="datetime in iso format",
description="All results will be after this datetime",
),
MetadataInfo(
name="__end_date",
type="datetime in iso format",
description="All results will be before this datetime",
)
],
)
from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever
retriever = VectorIndexAutoRetriever(index,
vector_store_info=vector_store_info,
service_context=index.service_context,
**retriever_args)
# build query engine
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever, service_context=index.service_context
)
from llama_index.tools.query_engine import QueryEngineTool
# convert query engine to tool
query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine)
from llama_index.agent import OpenAIAgent
chat_engine = OpenAIAgent.from_tools(
tools=[query_engine_tool],
llm=index.service_context.llm,
verbose=True
#service_context=index.service_context
)
return chat_engine
def tm_demo():
repos = get_repos()
months = st.sidebar.slider('How many months back to search (0=no limit)?', 0, 130, 0)
if "config_months" not in st.session_state.keys() or months != st.session_state.config_months:
st.session_state.clear()
topk = st.sidebar.slider('How many commits to retrieve', 1, 150, 20)
if "config_topk" not in st.session_state.keys() or topk != st.session_state.config_topk:
st.session_state.clear()
if len(repos) > 0:
repo = st.sidebar.selectbox("Choose a repo", repos.keys())
else:
st.error("No repositiories found, please [load some data first](/LoadData)")
return
if "config_repo" not in st.session_state.keys() or repo != st.session_state.config_repo:
st.session_state.clear()
st.session_state.config_months = months
st.session_state.config_topk = topk
st.session_state.config_repo = repo
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Please choose a repo and time filter on the sidebar and then ask me a question about the git history"}
]
vector_store = TimescaleVectorStore.from_params(
service_url=st.secrets["TIMESCALE_SERVICE_URL"],
table_name=repos[repo],
time_partition_interval=timedelta(days=7),
);
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-4", temperature=0.1))
set_global_service_context(service_context)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
#chat engine goes into the session to retain history
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
retriever_args = {"similarity_top_k" : int(topk)}
if months > 0:
end_dt = datetime.now()
start_dt = end_dt - timedelta(weeks=4*months)
retriever_args["vector_store_kwargs"] = ({"start_date": start_dt, "end_date":end_dt})
st.session_state.chat_engine = get_auto_retriever(index, retriever_args)
#st.session_state.chat_engine = index.as_chat_engine(chat_mode="best", similarity_top_k=20, verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.chat(prompt, function_call="query_engine_tool")
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
st.set_page_config(page_title="Time machine demo", page_icon="🧑💼")
st.markdown("# Time Machine")
st.sidebar.header("Welcome to the Time Machine")
debug_llamaindex = False
if debug_llamaindex:
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
tm_demo()
#show_code(tm_demo)
| [
"llama_index.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.vector_stores.types.MetadataInfo",
"llama_index.set_global_service_context",
"llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_index.indices.vector_store.VectorStoreIndex.from_vector_store",
"llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args"
] | [((7098, 7170), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Time machine demo"""', 'page_icon': '"""🧑\u200d💼"""'}), "(page_title='Time machine demo', page_icon='🧑\\u200d💼')\n", (7116, 7170), True, 'import streamlit as st\n'), ((7166, 7195), 'streamlit.markdown', 'st.markdown', (['"""# Time Machine"""'], {}), "('# Time Machine')\n", (7177, 7195), True, 'import streamlit as st\n'), ((7196, 7244), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Welcome to the Time Machine"""'], {}), "('Welcome to the Time Machine')\n", (7213, 7244), True, 'import streamlit as st\n'), ((2991, 3120), 'llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever', 'VectorIndexAutoRetriever', (['index'], {'vector_store_info': 'vector_store_info', 'service_context': 'index.service_context'}), '(index, vector_store_info=vector_store_info,\n service_context=index.service_context, **retriever_args)\n', (3015, 3120), False, 'from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n'), ((3376, 3471), 'llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', ([], {'retriever': 'retriever', 'service_context': 'index.service_context'}), '(retriever=retriever, service_context=index.\n service_context)\n', (3406, 3471), False, 'from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine\n'), ((3604, 3660), 'llama_index.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine)\n', (3633, 3660), False, 'from llama_index.tools.query_engine import QueryEngineTool\n'), ((3726, 3825), 'llama_index.agent.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', ([], {'tools': '[query_engine_tool]', 'llm': 'index.service_context.llm', 'verbose': '(True)'}), '(tools=[query_engine_tool], llm=index.service_context\n .llm, verbose=True)\n', (3748, 3825), False, 'from llama_index.agent import OpenAIAgent\n'), ((3975, 4051), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many months back to search (0=no limit)?"""', '(0)', '(130)', '(0)'], {}), "('How many months back to search (0=no limit)?', 0, 130, 0)\n", (3992, 4051), True, 'import streamlit as st\n'), ((4197, 4258), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many commits to retrieve"""', '(1)', '(150)', '(20)'], {}), "('How many commits to retrieve', 1, 150, 20)\n", (4214, 4258), True, 'import streamlit as st\n'), ((5443, 5486), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (5469, 5486), False, 'from llama_index import set_global_service_context\n'), ((5499, 5597), 'llama_index.indices.vector_store.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (5533, 5597), False, 'from llama_index.indices.vector_store import VectorStoreIndex\n'), ((7331, 7389), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (7350, 7389), False, 'import logging\n'), ((1240, 1297), 'psycopg2.connect', 'psycopg2.connect', ([], {'dsn': "st.secrets['TIMESCALE_SERVICE_URL']"}), "(dsn=st.secrets['TIMESCALE_SERVICE_URL'])\n", (1256, 1297), False, 'import psycopg2\n'), ((4160, 4184), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4182, 4184), True, 'import streamlit as st\n'), ((4360, 4384), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4382, 4384), True, 'import streamlit as st\n'), ((4502, 4578), 'streamlit.error', 'st.error', (['"""No repositiories found, please [load some data first](/LoadData)"""'], {}), "('No repositiories found, please [load some data first](/LoadData)')\n", (4510, 4578), True, 'import streamlit as st\n'), ((4700, 4724), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4722, 4724), True, 'import streamlit as st\n'), ((4881, 4904), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4902, 4904), True, 'import streamlit as st\n'), ((5693, 5716), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (5714, 5716), True, 'import streamlit as st\n'), ((6233, 6263), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (6246, 6263), True, 'import streamlit as st\n'), ((6322, 6391), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (6354, 6391), True, 'import streamlit as st\n'), ((7425, 7465), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (7446, 7465), False, 'import logging\n'), ((4083, 4106), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4104, 4106), True, 'import streamlit as st\n'), ((4287, 4310), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4308, 4310), True, 'import streamlit as st\n'), ((4627, 4650), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4648, 4650), True, 'import streamlit as st\n'), ((5317, 5334), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (5326, 5334), False, 'from datetime import datetime, timedelta\n'), ((5399, 5437), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0.1)'}), "(model='gpt-4', temperature=0.1)\n", (5405, 5437), False, 'from llama_index.llms import OpenAI\n'), ((5849, 5863), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5861, 5863), False, 'from datetime import datetime, timedelta\n'), ((6486, 6518), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (6501, 6518), True, 'import streamlit as st\n'), ((6532, 6560), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (6540, 6560), True, 'import streamlit as st\n'), ((6705, 6733), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6720, 6733), True, 'import streamlit as st\n'), ((7394, 7413), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7411, 7413), False, 'import logging\n'), ((2185, 2256), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""commit_hash"""', 'type': '"""str"""', 'description': '"""Commit Hash"""'}), "(name='commit_hash', type='str', description='Commit Hash')\n", (2197, 2256), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2333, 2408), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""author"""', 'type': '"""str"""', 'description': '"""Author of the commit"""'}), "(name='author', type='str', description='Author of the commit')\n", (2345, 2408), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2485, 2608), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""__start_date"""', 'type': '"""datetime in iso format"""', 'description': '"""All results will be after this datetime"""'}), "(name='__start_date', type='datetime in iso format',\n description='All results will be after this datetime')\n", (2497, 2608), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2686, 2809), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""__end_date"""', 'type': '"""datetime in iso format"""', 'description': '"""All results will be before this datetime"""'}), "(name='__end_date', type='datetime in iso format', description=\n 'All results will be before this datetime')\n", (2698, 2809), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((5896, 5923), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(4 * months)'}), '(weeks=4 * months)\n', (5905, 5923), False, 'from datetime import datetime, timedelta\n'), ((6752, 6777), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (6762, 6777), True, 'import streamlit as st\n'), ((6806, 6882), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', (['prompt'], {'function_call': '"""query_engine_tool"""'}), "(prompt, function_call='query_engine_tool')\n", (6839, 6882), True, 'import streamlit as st\n'), ((6899, 6926), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (6907, 6926), True, 'import streamlit as st\n'), ((7021, 7062), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (7053, 7062), True, 'import streamlit as st\n')] |
from langchain.prompts import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from llama_index import VectorStoreIndex, ServiceContext, StorageContext
from llama_index.vector_stores import FaissVectorStore
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.query_engine import SubQuestionQueryEngine
from llama_index.embeddings import OpenAIEmbedding
from llama_index.schema import Document
from llama_index.node_parser import UnstructuredElementNodeParser
from src.utils import get_model, process_pdf2, generate_pydantic_model
from src.pydantic_models import FiscalYearHighlights, StrategyOutlookFutureDirection, RiskManagement, CorporateGovernanceSocialResponsibility, InnovationRnD
# from src.fields import (
# fiscal_year_fields, fiscal_year_attributes,
# strat_outlook_fields, strat_outlook_attributes,
# risk_management_fields, risk_management_attributes,
# innovation_fields, innovation_attributes
# )
from src.fields2 import (
fiscal_year, fiscal_year_attributes,
strat_outlook, strat_outlook_attributes,
risk_management, risk_management_attributes,
innovation, innovation_attributes
)
import streamlit as st
import os
import faiss
import time
from pypdf import PdfReader
st.set_page_config(page_title="Annual Report Analyzer", page_icon=":card_index_dividers:", initial_sidebar_state="expanded", layout="wide")
st.title(":card_index_dividers: Annual Report Analyzer")
st.info("""
Begin by uploading the annual report of your chosen company in PDF format. Afterward, click on 'Process PDF'. Once the document has been processed, tap on 'Analyze Report' and the system will start its magic. After a brief wait, you'll be presented with a detailed analysis and insights derived from the report for your reading.
""")
def process_pdf(pdf):
file = PdfReader(pdf)
document_list = []
for page in file.pages:
document_list.append(Document(text=str(page.extract_text())))
node_paser = UnstructuredElementNodeParser()
nodes = node_paser.get_nodes_from_documents(document_list, show_progress=True)
return nodes
def get_vector_index(nodes, vector_store):
print(nodes)
llm = get_model("openai")
if vector_store == "faiss":
d = 1536
faiss_index = faiss.IndexFlatL2(d)
vector_store = FaissVectorStore(faiss_index=faiss_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# embed_model = OpenAIEmbedding()
# service_context = ServiceContext.from_defaults(embed_model=embed_model)
service_context = ServiceContext.from_defaults(llm=llm)
index = VectorStoreIndex(nodes,
service_context=service_context,
storage_context=storage_context
)
elif vector_store == "simple":
index = VectorStoreIndex.from_documents(nodes)
return index
def generate_insight(engine, insight_name, section_name, output_format):
with open("prompts/report.prompt", "r") as f:
template = f.read()
prompt_template = PromptTemplate(
template=template,
input_variables=['insight_name', 'section_name', 'output_format']
)
formatted_input = prompt_template.format(insight_name=insight_name, section_name=section_name, output_format=output_format)
print(formatted_input)
response = engine.query(formatted_input)
return response.response
def report_insights(engine, section_name, fields_to_include, section_num):
fields = None
attribs = None
if section_num == 1:
fields = fiscal_year
attribs = fiscal_year_attributes
elif section_num == 2:
fields = strat_outlook
attribs = strat_outlook_attributes
elif section_num == 3:
fields = risk_management
attribs = risk_management_attributes
elif section_num == 4:
fields = innovation
attribs = innovation_attributes
ins = {}
for i, field in enumerate(attribs):
if fields_to_include[i]:
response = generate_insight(engine, field, section_name, str({field: fields[field]}))
ins[field] = response
return {
"insights": ins
}
def get_query_engine(engine):
llm = get_model("openai")
service_context = ServiceContext.from_defaults(llm=llm)
query_engine_tools = [
QueryEngineTool(
query_engine=engine,
metadata=ToolMetadata(
name="Annual Report",
description=f"Provides information about the company from its annual report.",
),
),
]
s_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=query_engine_tools,
service_context=service_context
)
return s_engine
for insight in fiscal_year_attributes:
if insight not in st.session_state:
st.session_state[insight] = None
for insight in strat_outlook_attributes:
if insight not in st.session_state:
st.session_state[insight] = None
for insight in risk_management_attributes:
if insight not in st.session_state:
st.session_state[insight] = None
for insight in innovation_attributes:
if insight not in st.session_state:
st.session_state[insight] = None
if "end_time" not in st.session_state:
st.session_state.end_time = None
if "process_doc" not in st.session_state:
st.session_state.process_doc = False
st.sidebar.info("""
You can get your OpenAI API key [here](https://openai.com/blog/openai-api)
""")
OPENAI_API_KEY = st.sidebar.text_input("OpenAI API Key", type="password")
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
if not OPENAI_API_KEY:
st.error("Please enter your OpenAI API Key")
if OPENAI_API_KEY:
pdfs = st.sidebar.file_uploader("Upload the annual report in PDF format", type="pdf")
st.sidebar.info("""
Example reports you can upload here:
- [Apple Inc.](https://s2.q4cdn.com/470004039/files/doc_financials/2022/q4/_10-K-2022-(As-Filed).pdf)
- [Microsoft Corporation](https://microsoft.gcs-web.com/static-files/07cf3c30-cfc3-4567-b20f-f4b0f0bd5087)
- [Tesla Inc.](https://digitalassets.tesla.com/tesla-contents/image/upload/IR/TSLA-Q4-2022-Update)
""")
if st.sidebar.button("Process Document"):
with st.spinner("Processing Document..."):
nodes = process_pdf(pdfs)
st.session_state.index = get_vector_index(nodes, vector_store="faiss")
st.session_state.process_doc = True
st.toast("Document Processsed!")
if st.session_state.process_doc:
col1, col2 = st.columns([0.25, 0.75])
with col1:
st.write("""
### Select Insights
""")
with st.expander("**Fiscal Year Highlights**", expanded=True):
performance_highlights = st.toggle("Performance Highlights")
major_events = st.toggle("Major Events")
challenges_encountered = st.toggle("Challenges Encountered")
fiscal_year_highlights_list = [performance_highlights, major_events, challenges_encountered]
with st.expander("**Strategy Outlook and Future Direction**", expanded=True):
strategic_initiatives = st.toggle("Strategic Initiatives")
market_outlook = st.toggle("Market Outlook")
product_roadmap = st.toggle("Product Roadmap")
strategy_outlook_future_direction_list = [strategic_initiatives, market_outlook, product_roadmap]
with st.expander("**Risk Management**", expanded=True):
risk_factors = st.toggle("Risk Factors")
risk_mitigation = st.toggle("Risk Mitigation")
risk_management_list = [risk_factors, risk_mitigation]
with st.expander("**Innovation and R&D**", expanded=True):
r_and_d_activities = st.toggle("R&D Activities")
innovation_focus = st.toggle("Innovation Focus")
innovation_and_rd_list = [r_and_d_activities, innovation_focus]
with col2:
if st.button("Analyze Report"):
engine = get_query_engine(st.session_state.index.as_query_engine(similarity_top_k=3))
start_time = time.time()
with st.status("**Analyzing Report...**"):
if any(fiscal_year_highlights_list):
st.write("Fiscal Year Highlights...")
for i, insight in enumerate(fiscal_year_attributes):
if st.session_state[insight]:
fiscal_year_highlights_list[i] = False
response = report_insights(engine, "Fiscal Year Highlights", fiscal_year_highlights_list, 1)
for key, value in response["insights"].items():
st.session_state[key] = value
if any(strategy_outlook_future_direction_list):
st.write("Strategy Outlook and Future Direction...")
for i, insight in enumerate(strat_outlook_attributes):
if st.session_state[insight]:
strategy_outlook_future_direction_list[i] = False
response = report_insights(engine, "Strategy Outlook and Future Direction", strategy_outlook_future_direction_list, 2)
for key, value in response["insights"].items():
st.session_state[key] = value
if any(risk_management_list):
st.write("Risk Management...")
for i, insight in enumerate(risk_management_attributes):
if st.session_state[insight]:
risk_management_list[i] = False
response = report_insights(engine, "Risk Management", risk_management_list, 3)
for key, value in response["insights"].items():
st.session_state[key] = value
if any(innovation_and_rd_list):
st.write("Innovation and R&D...")
for i, insight in enumerate(innovation_attributes):
if st.session_state[insight]:
innovation_and_rd_list[i] = False
response = report_insights(engine, "Innovation and R&D", innovation_and_rd_list, 4)
st.session_state.innovation_and_rd = response
for key, value in response["insights"].items():
st.session_state[key] = value
st.session_state["end_time"] = "{:.2f}".format((time.time() - start_time))
st.toast("Report Analysis Complete!")
if st.session_state.end_time:
st.write("Report Analysis Time: ", st.session_state.end_time, "s")
# if st.session_state.all_report_outputs:
# st.toast("Report Analysis Complete!")
tab1, tab2, tab3, tab4 = st.tabs(["Fiscal Year Highlights", "Strategy Outlook and Future Direction", "Risk Management", "Innovation and R&D"])
with tab1:
st.write("## Fiscal Year Highlights")
try:
if performance_highlights:
if st.session_state['performance_highlights']:
st.write("### Performance Highlights")
st.write(st.session_state['performance_highlights'])
else:
st.error("fiscal Year Highlights insight has not been generated")
except:
st.error("This insight has not been generated")
try:
if major_events:
if st.session_state["major_events"]:
st.write("### Major Events")
st.write(st.session_state["major_events"])
else:
st.error("Major Events insight has not been generated")
except:
st.error("This insight has not been generated")
try:
if challenges_encountered:
if st.session_state["challenges_encountered"]:
st.write("### Challenges Encountered")
st.write(st.session_state["challenges_encountered"])
else:
st.error("Challenges Encountered insight has not been generated")
except:
st.error("This insight has not been generated")
# st.write("### Milestone Achievements")
# st.write(str(st.session_state.fiscal_year_highlights.milestone_achievements))
with tab2:
st.write("## Strategy Outlook and Future Direction")
try:
if strategic_initiatives:
if st.session_state["strategic_initiatives"]:
st.write("### Strategic Initiatives")
st.write(st.session_state["strategic_initiatives"])
else:
st.error("Strategic Initiatives insight has not been generated")
except:
st.error("This insight has not been generated")
try:
if market_outlook:
if st.session_state["market_outlook"]:
st.write("### Market Outlook")
st.write(st.session_state["market_outlook"])
else:
st.error("Market Outlook insight has not been generated")
except:
st.error("This insight has not been generated")
try:
if product_roadmap:
if st.session_state["product_roadmap"]:
st.write("### Product Roadmap")
st.write(st.session_state["product_roadmap"])
else:
st.error("Product Roadmap insight has not been generated")
except:
st.error("This insight has not been generated")
with tab3:
st.write("## Risk Management")
try:
if risk_factors:
if st.session_state["risk_factors"]:
st.write("### Risk Factors")
st.write(st.session_state["risk_factors"])
else:
st.error("Risk Factors insight has not been generated")
except:
st.error("This insight has not been generated")
try:
if risk_mitigation:
if st.session_state["risk_mitigation"]:
st.write("### Risk Mitigation")
st.write(st.session_state["risk_mitigation"])
else:
st.error("Risk Mitigation insight has not been generated")
except:
st.error("This insight has not been generated")
with tab4:
st.write("## Innovation and R&D")
try:
if r_and_d_activities:
if st.session_state["r_and_d_activities"]:
st.write("### R&D Activities")
st.write(st.session_state["r_and_d_activities"])
else:
st.error("R&D Activities insight has not been generated")
except:
st.error("This insight has not been generated")
try:
if innovation_focus:
if st.session_state["innovation_focus"]:
st.write("### Innovation Focus")
st.write(st.session_state["innovation_focus"])
else:
st.error("Innovation Focus insight has not been generated")
except:
st.error("This insight has not been generated")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.vector_stores.FaissVectorStore",
"llama_index.node_parser.UnstructuredElementNodeParser",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.StorageContext.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults"
] | [((1272, 1416), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Annual Report Analyzer"""', 'page_icon': '""":card_index_dividers:"""', 'initial_sidebar_state': '"""expanded"""', 'layout': '"""wide"""'}), "(page_title='Annual Report Analyzer', page_icon=\n ':card_index_dividers:', initial_sidebar_state='expanded', layout='wide')\n", (1290, 1416), True, 'import streamlit as st\n'), ((1413, 1469), 'streamlit.title', 'st.title', (['""":card_index_dividers: Annual Report Analyzer"""'], {}), "(':card_index_dividers: Annual Report Analyzer')\n", (1421, 1469), True, 'import streamlit as st\n'), ((1470, 1825), 'streamlit.info', 'st.info', (['"""\nBegin by uploading the annual report of your chosen company in PDF format. Afterward, click on \'Process PDF\'. Once the document has been processed, tap on \'Analyze Report\' and the system will start its magic. After a brief wait, you\'ll be presented with a detailed analysis and insights derived from the report for your reading.\n"""'], {}), '(\n """\nBegin by uploading the annual report of your chosen company in PDF format. Afterward, click on \'Process PDF\'. Once the document has been processed, tap on \'Analyze Report\' and the system will start its magic. After a brief wait, you\'ll be presented with a detailed analysis and insights derived from the report for your reading.\n"""\n )\n', (1477, 1825), True, 'import streamlit as st\n'), ((5455, 5564), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""\nYou can get your OpenAI API key [here](https://openai.com/blog/openai-api)\n"""'], {}), '(\n """\nYou can get your OpenAI API key [here](https://openai.com/blog/openai-api)\n"""\n )\n', (5470, 5564), True, 'import streamlit as st\n'), ((5572, 5628), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""'}), "('OpenAI API Key', type='password')\n", (5593, 5628), True, 'import streamlit as st\n'), ((1850, 1864), 'pypdf.PdfReader', 'PdfReader', (['pdf'], {}), '(pdf)\n', (1859, 1864), False, 'from pypdf import PdfReader\n'), ((2005, 2036), 'llama_index.node_parser.UnstructuredElementNodeParser', 'UnstructuredElementNodeParser', ([], {}), '()\n', (2034, 2036), False, 'from llama_index.node_parser import UnstructuredElementNodeParser\n'), ((2214, 2233), 'src.utils.get_model', 'get_model', (['"""openai"""'], {}), "('openai')\n", (2223, 2233), False, 'from src.utils import get_model, process_pdf2, generate_pydantic_model\n'), ((3089, 3193), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['insight_name', 'section_name', 'output_format']"}), "(template=template, input_variables=['insight_name',\n 'section_name', 'output_format'])\n", (3103, 3193), False, 'from langchain.prompts import PromptTemplate\n'), ((4263, 4282), 'src.utils.get_model', 'get_model', (['"""openai"""'], {}), "('openai')\n", (4272, 4282), False, 'from src.utils import get_model, process_pdf2, generate_pydantic_model\n'), ((4305, 4342), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (4333, 4342), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext\n'), ((4646, 4758), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools', 'service_context': 'service_context'}), '(query_engine_tools=query_engine_tools,\n service_context=service_context)\n', (4682, 4758), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((5703, 5747), 'streamlit.error', 'st.error', (['"""Please enter your OpenAI API Key"""'], {}), "('Please enter your OpenAI API Key')\n", (5711, 5747), True, 'import streamlit as st\n'), ((5779, 5857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload the annual report in PDF format"""'], {'type': '"""pdf"""'}), "('Upload the annual report in PDF format', type='pdf')\n", (5803, 5857), True, 'import streamlit as st\n'), ((5862, 6262), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""\n Example reports you can upload here: \n - [Apple Inc.](https://s2.q4cdn.com/470004039/files/doc_financials/2022/q4/_10-K-2022-(As-Filed).pdf)\n - [Microsoft Corporation](https://microsoft.gcs-web.com/static-files/07cf3c30-cfc3-4567-b20f-f4b0f0bd5087)\n - [Tesla Inc.](https://digitalassets.tesla.com/tesla-contents/image/upload/IR/TSLA-Q4-2022-Update)\n """'], {}), '(\n """\n Example reports you can upload here: \n - [Apple Inc.](https://s2.q4cdn.com/470004039/files/doc_financials/2022/q4/_10-K-2022-(As-Filed).pdf)\n - [Microsoft Corporation](https://microsoft.gcs-web.com/static-files/07cf3c30-cfc3-4567-b20f-f4b0f0bd5087)\n - [Tesla Inc.](https://digitalassets.tesla.com/tesla-contents/image/upload/IR/TSLA-Q4-2022-Update)\n """\n )\n', (5877, 6262), True, 'import streamlit as st\n'), ((6261, 6298), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Process Document"""'], {}), "('Process Document')\n", (6278, 6298), True, 'import streamlit as st\n'), ((2305, 2325), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['d'], {}), '(d)\n', (2322, 2325), False, 'import faiss\n'), ((2349, 2390), 'llama_index.vector_stores.FaissVectorStore', 'FaissVectorStore', ([], {'faiss_index': 'faiss_index'}), '(faiss_index=faiss_index)\n', (2365, 2390), False, 'from llama_index.vector_stores import FaissVectorStore\n'), ((2417, 2472), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2445, 2472), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext\n'), ((2623, 2660), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (2651, 2660), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext\n'), ((2678, 2772), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(nodes, service_context=service_context, storage_context=\n storage_context)\n', (2694, 2772), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext\n'), ((6542, 6574), 'streamlit.toast', 'st.toast', (['"""Document Processsed!"""'], {}), "('Document Processsed!')\n", (6550, 6574), True, 'import streamlit as st\n'), ((6636, 6660), 'streamlit.columns', 'st.columns', (['[0.25, 0.75]'], {}), '([0.25, 0.75])\n', (6646, 6660), True, 'import streamlit as st\n'), ((2853, 2891), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['nodes'], {}), '(nodes)\n', (2884, 2891), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext\n'), ((6313, 6349), 'streamlit.spinner', 'st.spinner', (['"""Processing Document..."""'], {}), "('Processing Document...')\n", (6323, 6349), True, 'import streamlit as st\n'), ((6693, 6758), 'streamlit.write', 'st.write', (['"""\n ### Select Insights\n """'], {}), '("""\n ### Select Insights\n """)\n', (6701, 6758), True, 'import streamlit as st\n'), ((8153, 8180), 'streamlit.button', 'st.button', (['"""Analyze Report"""'], {}), "('Analyze Report')\n", (8162, 8180), True, 'import streamlit as st\n'), ((11243, 11364), 'streamlit.tabs', 'st.tabs', (["['Fiscal Year Highlights', 'Strategy Outlook and Future Direction',\n 'Risk Management', 'Innovation and R&D']"], {}), "(['Fiscal Year Highlights', 'Strategy Outlook and Future Direction',\n 'Risk Management', 'Innovation and R&D'])\n", (11250, 11364), True, 'import streamlit as st\n'), ((4450, 4568), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""Annual Report"""', 'description': 'f"""Provides information about the company from its annual report."""'}), "(name='Annual Report', description=\n f'Provides information about the company from its annual report.')\n", (4462, 4568), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((6789, 6845), 'streamlit.expander', 'st.expander', (['"""**Fiscal Year Highlights**"""'], {'expanded': '(True)'}), "('**Fiscal Year Highlights**', expanded=True)\n", (6800, 6845), True, 'import streamlit as st\n'), ((6888, 6923), 'streamlit.toggle', 'st.toggle', (['"""Performance Highlights"""'], {}), "('Performance Highlights')\n", (6897, 6923), True, 'import streamlit as st\n'), ((6955, 6980), 'streamlit.toggle', 'st.toggle', (['"""Major Events"""'], {}), "('Major Events')\n", (6964, 6980), True, 'import streamlit as st\n'), ((7022, 7057), 'streamlit.toggle', 'st.toggle', (['"""Challenges Encountered"""'], {}), "('Challenges Encountered')\n", (7031, 7057), True, 'import streamlit as st\n'), ((7186, 7257), 'streamlit.expander', 'st.expander', (['"""**Strategy Outlook and Future Direction**"""'], {'expanded': '(True)'}), "('**Strategy Outlook and Future Direction**', expanded=True)\n", (7197, 7257), True, 'import streamlit as st\n'), ((7299, 7333), 'streamlit.toggle', 'st.toggle', (['"""Strategic Initiatives"""'], {}), "('Strategic Initiatives')\n", (7308, 7333), True, 'import streamlit as st\n'), ((7367, 7394), 'streamlit.toggle', 'st.toggle', (['"""Market Outlook"""'], {}), "('Market Outlook')\n", (7376, 7394), True, 'import streamlit as st\n'), ((7429, 7457), 'streamlit.toggle', 'st.toggle', (['"""Product Roadmap"""'], {}), "('Product Roadmap')\n", (7438, 7457), True, 'import streamlit as st\n'), ((7591, 7640), 'streamlit.expander', 'st.expander', (['"""**Risk Management**"""'], {'expanded': '(True)'}), "('**Risk Management**', expanded=True)\n", (7602, 7640), True, 'import streamlit as st\n'), ((7673, 7698), 'streamlit.toggle', 'st.toggle', (['"""Risk Factors"""'], {}), "('Risk Factors')\n", (7682, 7698), True, 'import streamlit as st\n'), ((7733, 7761), 'streamlit.toggle', 'st.toggle', (['"""Risk Mitigation"""'], {}), "('Risk Mitigation')\n", (7742, 7761), True, 'import streamlit as st\n'), ((7852, 7904), 'streamlit.expander', 'st.expander', (['"""**Innovation and R&D**"""'], {'expanded': '(True)'}), "('**Innovation and R&D**', expanded=True)\n", (7863, 7904), True, 'import streamlit as st\n'), ((7943, 7970), 'streamlit.toggle', 'st.toggle', (['"""R&D Activities"""'], {}), "('R&D Activities')\n", (7952, 7970), True, 'import streamlit as st\n'), ((8006, 8035), 'streamlit.toggle', 'st.toggle', (['"""Innovation Focus"""'], {}), "('Innovation Focus')\n", (8015, 8035), True, 'import streamlit as st\n'), ((8313, 8324), 'time.time', 'time.time', ([], {}), '()\n', (8322, 8324), False, 'import time\n'), ((11022, 11088), 'streamlit.write', 'st.write', (['"""Report Analysis Time: """', 'st.session_state.end_time', '"""s"""'], {}), "('Report Analysis Time: ', st.session_state.end_time, 's')\n", (11030, 11088), True, 'import streamlit as st\n'), ((11432, 11469), 'streamlit.write', 'st.write', (['"""## Fiscal Year Highlights"""'], {}), "('## Fiscal Year Highlights')\n", (11440, 11469), True, 'import streamlit as st\n'), ((13138, 13190), 'streamlit.write', 'st.write', (['"""## Strategy Outlook and Future Direction"""'], {}), "('## Strategy Outlook and Future Direction')\n", (13146, 13190), True, 'import streamlit as st\n'), ((14663, 14693), 'streamlit.write', 'st.write', (['"""## Risk Management"""'], {}), "('## Risk Management')\n", (14671, 14693), True, 'import streamlit as st\n'), ((15658, 15691), 'streamlit.write', 'st.write', (['"""## Innovation and R&D"""'], {}), "('## Innovation and R&D')\n", (15666, 15691), True, 'import streamlit as st\n'), ((8224, 8282), 'streamlit.session_state.index.as_query_engine', 'st.session_state.index.as_query_engine', ([], {'similarity_top_k': '(3)'}), '(similarity_top_k=3)\n', (8262, 8282), True, 'import streamlit as st\n'), ((8347, 8383), 'streamlit.status', 'st.status', (['"""**Analyzing Report...**"""'], {}), "('**Analyzing Report...**')\n", (8356, 8383), True, 'import streamlit as st\n'), ((10913, 10950), 'streamlit.toast', 'st.toast', (['"""Report Analysis Complete!"""'], {}), "('Report Analysis Complete!')\n", (10921, 10950), True, 'import streamlit as st\n'), ((8468, 8505), 'streamlit.write', 'st.write', (['"""Fiscal Year Highlights..."""'], {}), "('Fiscal Year Highlights...')\n", (8476, 8505), True, 'import streamlit as st\n'), ((9055, 9107), 'streamlit.write', 'st.write', (['"""Strategy Outlook and Future Direction..."""'], {}), "('Strategy Outlook and Future Direction...')\n", (9063, 9107), True, 'import streamlit as st\n'), ((9678, 9708), 'streamlit.write', 'st.write', (['"""Risk Management..."""'], {}), "('Risk Management...')\n", (9686, 9708), True, 'import streamlit as st\n'), ((10249, 10282), 'streamlit.write', 'st.write', (['"""Innovation and R&D..."""'], {}), "('Innovation and R&D...')\n", (10257, 10282), True, 'import streamlit as st\n'), ((11926, 11973), 'streamlit.error', 'st.error', (['"""This insight has not been generated"""'], {}), "('This insight has not been generated')\n", (11934, 11973), True, 'import streamlit as st\n'), ((12380, 12427), 'streamlit.error', 'st.error', (['"""This insight has not been generated"""'], {}), "('This insight has not been generated')\n", (12388, 12427), True, 'import streamlit as st\n'), ((12883, 12930), 'streamlit.error', 'st.error', (['"""This insight has not been generated"""'], {}), "('This insight has not been generated')\n", (12891, 12930), True, 'import streamlit as st\n'), ((13641, 13688), 'streamlit.error', 'st.error', (['"""This insight has not been generated"""'], {}), "('This insight has not been generated')\n", (13649, 13688), True, 'import streamlit as st\n'), ((14106, 14153), 'streamlit.error', 'st.error', (['"""This insight has not been generated"""'], {}), "('This insight has not been generated')\n", (14114, 14153), True, 'import streamlit as st\n'), ((14575, 14622), 'streamlit.error', 'st.error', (['"""This insight has not been generated"""'], {}), "('This insight has not been generated')\n", (14583, 14622), True, 'import streamlit as st\n'), ((15100, 15147), 'streamlit.error', 'st.error', (['"""This insight has not been generated"""'], {}), "('This insight has not been generated')\n", (15108, 15147), True, 'import streamlit as st\n'), ((15569, 15616), 'streamlit.error', 'st.error', (['"""This insight has not been generated"""'], {}), "('This insight has not been generated')\n", (15577, 15616), True, 'import streamlit as st\n'), ((16120, 16167), 'streamlit.error', 'st.error', (['"""This insight has not been generated"""'], {}), "('This insight has not been generated')\n", (16128, 16167), True, 'import streamlit as st\n'), ((16594, 16641), 'streamlit.error', 'st.error', (['"""This insight has not been generated"""'], {}), "('This insight has not been generated')\n", (16602, 16641), True, 'import streamlit as st\n'), ((10863, 10874), 'time.time', 'time.time', ([], {}), '()\n', (10872, 10874), False, 'import time\n'), ((11638, 11676), 'streamlit.write', 'st.write', (['"""### Performance Highlights"""'], {}), "('### Performance Highlights')\n", (11646, 11676), True, 'import streamlit as st\n'), ((11705, 11757), 'streamlit.write', 'st.write', (["st.session_state['performance_highlights']"], {}), "(st.session_state['performance_highlights'])\n", (11713, 11757), True, 'import streamlit as st\n'), ((11816, 11881), 'streamlit.error', 'st.error', (['"""fiscal Year Highlights insight has not been generated"""'], {}), "('fiscal Year Highlights insight has not been generated')\n", (11824, 11881), True, 'import streamlit as st\n'), ((12122, 12150), 'streamlit.write', 'st.write', (['"""### Major Events"""'], {}), "('### Major Events')\n", (12130, 12150), True, 'import streamlit as st\n'), ((12179, 12221), 'streamlit.write', 'st.write', (["st.session_state['major_events']"], {}), "(st.session_state['major_events'])\n", (12187, 12221), True, 'import streamlit as st\n'), ((12280, 12335), 'streamlit.error', 'st.error', (['"""Major Events insight has not been generated"""'], {}), "('Major Events insight has not been generated')\n", (12288, 12335), True, 'import streamlit as st\n'), ((12595, 12633), 'streamlit.write', 'st.write', (['"""### Challenges Encountered"""'], {}), "('### Challenges Encountered')\n", (12603, 12633), True, 'import streamlit as st\n'), ((12662, 12714), 'streamlit.write', 'st.write', (["st.session_state['challenges_encountered']"], {}), "(st.session_state['challenges_encountered'])\n", (12670, 12714), True, 'import streamlit as st\n'), ((12773, 12838), 'streamlit.error', 'st.error', (['"""Challenges Encountered insight has not been generated"""'], {}), "('Challenges Encountered insight has not been generated')\n", (12781, 12838), True, 'import streamlit as st\n'), ((13356, 13393), 'streamlit.write', 'st.write', (['"""### Strategic Initiatives"""'], {}), "('### Strategic Initiatives')\n", (13364, 13393), True, 'import streamlit as st\n'), ((13422, 13473), 'streamlit.write', 'st.write', (["st.session_state['strategic_initiatives']"], {}), "(st.session_state['strategic_initiatives'])\n", (13430, 13473), True, 'import streamlit as st\n'), ((13532, 13596), 'streamlit.error', 'st.error', (['"""Strategic Initiatives insight has not been generated"""'], {}), "('Strategic Initiatives insight has not been generated')\n", (13540, 13596), True, 'import streamlit as st\n'), ((13841, 13871), 'streamlit.write', 'st.write', (['"""### Market Outlook"""'], {}), "('### Market Outlook')\n", (13849, 13871), True, 'import streamlit as st\n'), ((13900, 13944), 'streamlit.write', 'st.write', (["st.session_state['market_outlook']"], {}), "(st.session_state['market_outlook'])\n", (13908, 13944), True, 'import streamlit as st\n'), ((14003, 14060), 'streamlit.error', 'st.error', (['"""Market Outlook insight has not been generated"""'], {}), "('Market Outlook insight has not been generated')\n", (14011, 14060), True, 'import streamlit as st\n'), ((14308, 14339), 'streamlit.write', 'st.write', (['"""### Product Roadmap"""'], {}), "('### Product Roadmap')\n", (14316, 14339), True, 'import streamlit as st\n'), ((14368, 14413), 'streamlit.write', 'st.write', (["st.session_state['product_roadmap']"], {}), "(st.session_state['product_roadmap'])\n", (14376, 14413), True, 'import streamlit as st\n'), ((14472, 14530), 'streamlit.error', 'st.error', (['"""Product Roadmap insight has not been generated"""'], {}), "('Product Roadmap insight has not been generated')\n", (14480, 14530), True, 'import streamlit as st\n'), ((14842, 14870), 'streamlit.write', 'st.write', (['"""### Risk Factors"""'], {}), "('### Risk Factors')\n", (14850, 14870), True, 'import streamlit as st\n'), ((14899, 14941), 'streamlit.write', 'st.write', (["st.session_state['risk_factors']"], {}), "(st.session_state['risk_factors'])\n", (14907, 14941), True, 'import streamlit as st\n'), ((15000, 15055), 'streamlit.error', 'st.error', (['"""Risk Factors insight has not been generated"""'], {}), "('Risk Factors insight has not been generated')\n", (15008, 15055), True, 'import streamlit as st\n'), ((15302, 15333), 'streamlit.write', 'st.write', (['"""### Risk Mitigation"""'], {}), "('### Risk Mitigation')\n", (15310, 15333), True, 'import streamlit as st\n'), ((15362, 15407), 'streamlit.write', 'st.write', (["st.session_state['risk_mitigation']"], {}), "(st.session_state['risk_mitigation'])\n", (15370, 15407), True, 'import streamlit as st\n'), ((15466, 15524), 'streamlit.error', 'st.error', (['"""Risk Mitigation insight has not been generated"""'], {}), "('Risk Mitigation insight has not been generated')\n", (15474, 15524), True, 'import streamlit as st\n'), ((15852, 15882), 'streamlit.write', 'st.write', (['"""### R&D Activities"""'], {}), "('### R&D Activities')\n", (15860, 15882), True, 'import streamlit as st\n'), ((15911, 15959), 'streamlit.write', 'st.write', (["st.session_state['r_and_d_activities']"], {}), "(st.session_state['r_and_d_activities'])\n", (15919, 15959), True, 'import streamlit as st\n'), ((16018, 16075), 'streamlit.error', 'st.error', (['"""R&D Activities insight has not been generated"""'], {}), "('R&D Activities insight has not been generated')\n", (16026, 16075), True, 'import streamlit as st\n'), ((16324, 16356), 'streamlit.write', 'st.write', (['"""### Innovation Focus"""'], {}), "('### Innovation Focus')\n", (16332, 16356), True, 'import streamlit as st\n'), ((16385, 16431), 'streamlit.write', 'st.write', (["st.session_state['innovation_focus']"], {}), "(st.session_state['innovation_focus'])\n", (16393, 16431), True, 'import streamlit as st\n'), ((16490, 16549), 'streamlit.error', 'st.error', (['"""Innovation Focus insight has not been generated"""'], {}), "('Innovation Focus insight has not been generated')\n", (16498, 16549), True, 'import streamlit as st\n')] |
import logging
from threading import Thread
from typing import Any, List, Optional, Type
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.response.schema import RESPONSE_TYPE, StreamingResponse
from llama_index.core.callbacks import CallbackManager, trace_method
from llama_index.core.chat_engine.types import (
AgentChatResponse,
BaseChatEngine,
StreamingAgentChatResponse,
)
from llama_index.core.chat_engine.utils import response_gen_from_query_engine
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
from llama_index.core.base.llms.generic_utils import messages_to_history_str
from llama_index.core.llms.llm import LLM
from llama_index.core.memory import BaseMemory, ChatMemoryBuffer
from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate
from llama_index.core.service_context import ServiceContext
from llama_index.core.service_context_elements.llm_predictor import LLMPredictorType
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
)
from llama_index.core.tools import ToolOutput
logger = logging.getLogger(__name__)
DEFAULT_TEMPLATE = """\
Given a conversation (between Human and Assistant) and a follow up message from Human, \
rewrite the message to be a standalone question that captures all relevant context \
from the conversation.
<Chat History>
{chat_history}
<Follow Up Message>
{question}
<Standalone question>
"""
DEFAULT_PROMPT = PromptTemplate(DEFAULT_TEMPLATE)
class CondenseQuestionChatEngine(BaseChatEngine):
"""Condense Question Chat Engine.
First generate a standalone question from conversation context and last message,
then query the query engine for a response.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
condense_question_prompt: BasePromptTemplate,
memory: BaseMemory,
llm: LLMPredictorType,
verbose: bool = False,
callback_manager: Optional[CallbackManager] = None,
) -> None:
self._query_engine = query_engine
self._condense_question_prompt = condense_question_prompt
self._memory = memory
self._llm = llm
self._verbose = verbose
self.callback_manager = callback_manager or CallbackManager([])
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
condense_question_prompt: Optional[BasePromptTemplate] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
service_context: Optional[ServiceContext] = None,
verbose: bool = False,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
llm: Optional[LLM] = None,
**kwargs: Any,
) -> "CondenseQuestionChatEngine":
"""Initialize a CondenseQuestionChatEngine from default parameters."""
condense_question_prompt = condense_question_prompt or DEFAULT_PROMPT
if llm is None:
service_context = service_context or ServiceContext.from_defaults(
embed_model=MockEmbedding(embed_dim=2)
)
llm = service_context.llm
else:
service_context = service_context or ServiceContext.from_defaults(
llm=llm, embed_model=MockEmbedding(embed_dim=2)
)
chat_history = chat_history or []
memory = memory or memory_cls.from_defaults(chat_history=chat_history, llm=llm)
if system_prompt is not None:
raise NotImplementedError(
"system_prompt is not supported for CondenseQuestionChatEngine."
)
if prefix_messages is not None:
raise NotImplementedError(
"prefix_messages is not supported for CondenseQuestionChatEngine."
)
return cls(
query_engine,
condense_question_prompt,
memory,
llm,
verbose=verbose,
callback_manager=callback_manager_from_settings_or_context(
Settings, service_context
),
)
def _condense_question(
self, chat_history: List[ChatMessage], last_message: str
) -> str:
"""
Generate standalone question from conversation context and last message.
"""
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
return self._llm.predict(
self._condense_question_prompt,
question=last_message,
chat_history=chat_history_str,
)
async def _acondense_question(
self, chat_history: List[ChatMessage], last_message: str
) -> str:
"""
Generate standalone question from conversation context and last message.
"""
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
return await self._llm.apredict(
self._condense_question_prompt,
question=last_message,
chat_history=chat_history_str,
)
def _get_tool_output_from_response(
self, query: str, response: RESPONSE_TYPE
) -> ToolOutput:
if isinstance(response, StreamingResponse):
return ToolOutput(
content="",
tool_name="query_engine",
raw_input={"query": query},
raw_output=response,
)
else:
return ToolOutput(
content=str(response),
tool_name="query_engine",
raw_input={"query": query},
raw_output=response,
)
@trace_method("chat")
def chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = self._condense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = False
# Query with standalone question
query_response = self._query_engine.query(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
self._memory.put(
ChatMessage(role=MessageRole.ASSISTANT, content=str(query_response))
)
return AgentChatResponse(response=str(query_response), sources=[tool_output])
@trace_method("chat")
def stream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = self._condense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = True
# Query with standalone question
query_response = self._query_engine.query(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
if (
isinstance(query_response, StreamingResponse)
and query_response.response_gen is not None
):
# override the generator to include writing to chat history
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
response = StreamingAgentChatResponse(
chat_stream=response_gen_from_query_engine(query_response.response_gen),
sources=[tool_output],
)
thread = Thread(
target=response.write_response_to_history, args=(self._memory, True)
)
thread.start()
else:
raise ValueError("Streaming is not enabled. Please use chat() instead.")
return response
@trace_method("chat")
async def achat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = await self._acondense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = False
# Query with standalone question
query_response = await self._query_engine.aquery(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
self._memory.put(
ChatMessage(role=MessageRole.ASSISTANT, content=str(query_response))
)
return AgentChatResponse(response=str(query_response), sources=[tool_output])
@trace_method("chat")
async def astream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = await self._acondense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = True
# Query with standalone question
query_response = await self._query_engine.aquery(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
if (
isinstance(query_response, StreamingResponse)
and query_response.response_gen is not None
):
# override the generator to include writing to chat history
# TODO: query engine does not support async generator yet
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
response = StreamingAgentChatResponse(
chat_stream=response_gen_from_query_engine(query_response.response_gen),
sources=[tool_output],
)
thread = Thread(
target=response.write_response_to_history, args=(self._memory,)
)
thread.start()
else:
raise ValueError("Streaming is not enabled. Please use achat() instead.")
return response
def reset(self) -> None:
# Clear chat history
self._memory.reset()
@property
def chat_history(self) -> List[ChatMessage]:
"""Get chat history."""
return self._memory.get_all()
| [
"llama_index.core.tools.ToolOutput",
"llama_index.core.chat_engine.utils.response_gen_from_query_engine",
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.embeddings.mock_embed_model.MockEmbedding",
"llama_index.core.callbacks.trace_method",
"llama_index.core.base.llms.generic_utils.messages_to_history_str"
] | [((1220, 1247), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1237, 1247), False, 'import logging\n'), ((1579, 1611), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['DEFAULT_TEMPLATE'], {}), '(DEFAULT_TEMPLATE)\n', (1593, 1611), False, 'from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((5895, 5915), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (5907, 5915), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((7693, 7713), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (7705, 7713), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((9987, 10007), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (9999, 10007), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((11806, 11826), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (11818, 11826), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((4566, 4603), 'llama_index.core.base.llms.generic_utils.messages_to_history_str', 'messages_to_history_str', (['chat_history'], {}), '(chat_history)\n', (4589, 4603), False, 'from llama_index.core.base.llms.generic_utils import messages_to_history_str\n'), ((5057, 5094), 'llama_index.core.base.llms.generic_utils.messages_to_history_str', 'messages_to_history_str', (['chat_history'], {}), '(chat_history)\n', (5080, 5094), False, 'from llama_index.core.base.llms.generic_utils import messages_to_history_str\n'), ((2381, 2400), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2396, 2400), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((5491, 5592), 'llama_index.core.tools.ToolOutput', 'ToolOutput', ([], {'content': '""""""', 'tool_name': '"""query_engine"""', 'raw_input': "{'query': query}", 'raw_output': 'response'}), "(content='', tool_name='query_engine', raw_input={'query': query},\n raw_output=response)\n", (5501, 5592), False, 'from llama_index.core.tools import ToolOutput\n'), ((7430, 7481), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (7441, 7481), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((9724, 9800), 'threading.Thread', 'Thread', ([], {'target': 'response.write_response_to_history', 'args': '(self._memory, True)'}), '(target=response.write_response_to_history, args=(self._memory, True))\n', (9730, 9800), False, 'from threading import Thread\n'), ((11543, 11594), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (11554, 11594), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((13928, 13999), 'threading.Thread', 'Thread', ([], {'target': 'response.write_response_to_history', 'args': '(self._memory,)'}), '(target=response.write_response_to_history, args=(self._memory,))\n', (13934, 13999), False, 'from threading import Thread\n'), ((4216, 4284), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4257, 4284), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context\n'), ((9457, 9508), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (9468, 9508), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((13661, 13712), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (13672, 13712), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((9589, 9648), 'llama_index.core.chat_engine.utils.response_gen_from_query_engine', 'response_gen_from_query_engine', (['query_response.response_gen'], {}), '(query_response.response_gen)\n', (9619, 9648), False, 'from llama_index.core.chat_engine.utils import response_gen_from_query_engine\n'), ((13793, 13852), 'llama_index.core.chat_engine.utils.response_gen_from_query_engine', 'response_gen_from_query_engine', (['query_response.response_gen'], {}), '(query_response.response_gen)\n', (13823, 13852), False, 'from llama_index.core.chat_engine.utils import response_gen_from_query_engine\n'), ((3306, 3332), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(2)'}), '(embed_dim=2)\n', (3319, 3332), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((3515, 3541), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(2)'}), '(embed_dim=2)\n', (3528, 3541), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n')] |
from Settings import settings
from llama_index.core import VectorStoreIndex, StorageContext, load_index_from_storage, SimpleDirectoryReader, Settings, get_response_synthesizer
from llama_index.embeddings.ollama import OllamaEmbedding
from llama_index.llms.ollama import Ollama
from llama_index.readers.web import MainContentExtractorReader# TrafilaturaWebReader, BeautifulSoupWebReader, SimpleWebPageReader
from llama_index.core.postprocessor import SimilarityPostprocessor
from Utils import displayError, displayInfo
from time import time
import os
class RAG:
def __init__(self):
Settings.embed_model = OllamaEmbedding(base_url=settings.host, model_name="nomic-embed-text")
#Settings.embed_model = HuggingFaceEmbedding(model_name=f"BAAI/bge-{settings.embed_model}-en-v1.5")
self.index = None
def load_index(self, folder):
try:
storage_context = StorageContext.from_defaults(persist_dir=folder)
self.index = load_index_from_storage(storage_context)
except Exception as e: displayError(e)
def save_index(self, folder):
try:
self.index.storage_context.persist(persist_dir=folder)
except Exception as e: displayError(e)
def loadUrl(self, url, setStatus):
try:
start = time()
documents = MainContentExtractorReader().load_data([url])
self.index = VectorStoreIndex.from_documents(documents, show_progress=True)
message = f"Indexed URL into {len(documents)} chunks in {time()-start:0.2f} seconds."
displayInfo("Index", message)
setStatus(message)
except Exception as e:
displayError(e)
setStatus("Failed to index.")
def loadFolder(self, path, setStatus):
try:
start = time()
if isinstance(path, str):
documents = SimpleDirectoryReader(path, recursive=True).load_data()
else:
documents = SimpleDirectoryReader(input_files=path).load_data()
self.index = VectorStoreIndex.from_documents(documents, show_progress=True)
message = f"Indexed folder into {len(documents)} chunks in {time()-start:0.2f} seconds."
displayInfo("Index", message)
setStatus(message)
except Exception as e:
displayError(e)
setStatus("Failed to index.")
def ask(self, question):
node_postprocessors = [SimilarityPostprocessor(similarity_cutoff=settings.similarity_cutoff)]
query_engine = self.index.as_query_engine(similarity_top_k=settings.similarity_top_k, node_postprocessors = node_postprocessors, response_mode='no_text')
response = query_engine.query(question)
if response.source_nodes:
query_engine = self.index.as_query_engine(similarity_top_k=settings.similarity_top_k, node_postprocessors = node_postprocessors, response_mode=settings.response_mode, streaming=True)
self.response = query_engine.query(question)
return self.response.response_gen
raise Exception("No texts found for the question using the current rag settings") | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.readers.web.MainContentExtractorReader",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.postprocessor.SimilarityPostprocessor",
"llama_index.core.SimpleDirectoryReader",
"llama_index.embeddings.ollama.OllamaEmbedding"
] | [((608, 678), 'llama_index.embeddings.ollama.OllamaEmbedding', 'OllamaEmbedding', ([], {'base_url': 'settings.host', 'model_name': '"""nomic-embed-text"""'}), "(base_url=settings.host, model_name='nomic-embed-text')\n", (623, 678), False, 'from llama_index.embeddings.ollama import OllamaEmbedding\n'), ((861, 909), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'folder'}), '(persist_dir=folder)\n', (889, 909), False, 'from llama_index.core import VectorStoreIndex, StorageContext, load_index_from_storage, SimpleDirectoryReader, Settings, get_response_synthesizer\n'), ((926, 966), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (949, 966), False, 'from llama_index.core import VectorStoreIndex, StorageContext, load_index_from_storage, SimpleDirectoryReader, Settings, get_response_synthesizer\n'), ((1201, 1207), 'time.time', 'time', ([], {}), '()\n', (1205, 1207), False, 'from time import time\n'), ((1285, 1347), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'show_progress': '(True)'}), '(documents, show_progress=True)\n', (1316, 1347), False, 'from llama_index.core import VectorStoreIndex, StorageContext, load_index_from_storage, SimpleDirectoryReader, Settings, get_response_synthesizer\n'), ((1440, 1469), 'Utils.displayInfo', 'displayInfo', (['"""Index"""', 'message'], {}), "('Index', message)\n", (1451, 1469), False, 'from Utils import displayError, displayInfo\n'), ((1628, 1634), 'time.time', 'time', ([], {}), '()\n', (1632, 1634), False, 'from time import time\n'), ((1830, 1892), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'show_progress': '(True)'}), '(documents, show_progress=True)\n', (1861, 1892), False, 'from llama_index.core import VectorStoreIndex, StorageContext, load_index_from_storage, SimpleDirectoryReader, Settings, get_response_synthesizer\n'), ((1988, 2017), 'Utils.displayInfo', 'displayInfo', (['"""Index"""', 'message'], {}), "('Index', message)\n", (1999, 2017), False, 'from Utils import displayError, displayInfo\n'), ((2169, 2238), 'llama_index.core.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'settings.similarity_cutoff'}), '(similarity_cutoff=settings.similarity_cutoff)\n', (2192, 2238), False, 'from llama_index.core.postprocessor import SimilarityPostprocessor\n'), ((992, 1007), 'Utils.displayError', 'displayError', (['e'], {}), '(e)\n', (1004, 1007), False, 'from Utils import displayError, displayInfo\n'), ((1130, 1145), 'Utils.displayError', 'displayError', (['e'], {}), '(e)\n', (1142, 1145), False, 'from Utils import displayError, displayInfo\n'), ((1520, 1535), 'Utils.displayError', 'displayError', (['e'], {}), '(e)\n', (1532, 1535), False, 'from Utils import displayError, displayInfo\n'), ((2068, 2083), 'Utils.displayError', 'displayError', (['e'], {}), '(e)\n', (2080, 2083), False, 'from Utils import displayError, displayInfo\n'), ((1223, 1251), 'llama_index.readers.web.MainContentExtractorReader', 'MainContentExtractorReader', ([], {}), '()\n', (1249, 1251), False, 'from llama_index.readers.web import MainContentExtractorReader\n'), ((1408, 1414), 'time.time', 'time', ([], {}), '()\n', (1412, 1414), False, 'from time import time\n'), ((1680, 1723), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {'recursive': '(True)'}), '(path, recursive=True)\n', (1701, 1723), False, 'from llama_index.core import VectorStoreIndex, StorageContext, load_index_from_storage, SimpleDirectoryReader, Settings, get_response_synthesizer\n'), ((1761, 1800), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'path'}), '(input_files=path)\n', (1782, 1800), False, 'from llama_index.core import VectorStoreIndex, StorageContext, load_index_from_storage, SimpleDirectoryReader, Settings, get_response_synthesizer\n'), ((1956, 1962), 'time.time', 'time', ([], {}), '()\n', (1960, 1962), False, 'from time import time\n')] |
import logging
import os
import sys
from pathlib import Path
from dotenv import load_dotenv
from langchain.llms.octoai_endpoint import OctoAIEndpoint as OctoAiCloudLLM
from langchain.embeddings.octoai_embeddings import OctoAIEmbeddings
from llama_index import (
LLMPredictor,
ServiceContext,
download_loader,
GPTVectorStoreIndex,
LangchainEmbedding,
)
import time
from termios import tcflush, TCIFLUSH
# Get the current file's directory
current_dir = os.path.dirname(os.path.abspath(__file__))
# Change the current working directory
os.chdir(current_dir)
# Set logging level to CRITICAL
logging.basicConfig(level=logging.CRITICAL)
# Load environment variables
load_dotenv()
# Set the file storage directory
FILES = "./files"
def init():
"""
Initialize the files directory.
"""
if not os.path.exists(FILES):
os.mkdir(FILES)
def handle_exit():
"""
Handle exit gracefully.
"""
print("\nGoodbye!\n")
sys.exit(1)
def ask(file):
"""
Load the file, create the query engine and interactively answer user questions about the document.
"""
print("Loading...")
# Load the PDFReader
PDFReader = download_loader("PDFReader")
loader = PDFReader()
documents = loader.load_data(file=Path(file))
# Initialize the OctoAiCloudLLM
endpoint_url = os.getenv("ENDPOINT_URL")
# Set up the language model and predictor
llm = OctoAiCloudLLM(
endpoint_url=endpoint_url,
model_kwargs={
"model": "llama-2-7b-chat",
"messages": [
{
"role": "system",
"content": "Below is an instruction that describes a task. Write a response that appropriately completes the request."
}
],
"stream": False,
"max_tokens": 256
}
)
llm_predictor = LLMPredictor(llm=llm)
# Create the LangchainEmbedding
embeddings = LangchainEmbedding(
OctoAIEmbeddings(
endpoint_url="https://instructor-large-f1kzsig6xes9.octoai.run/predict"
)
)
# Create the ServiceContext
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, chunk_size_limit=512, embed_model=embeddings
)
# Create the index from documents
index = GPTVectorStoreIndex.from_documents(
documents, service_context=service_context
)
# Create the query engine
query_engine = index.as_query_engine(verbose=True, llm_predictor=llm_predictor)
# Clear the screen
os.system("clear")
print("Ready! Ask anything about the document")
print("")
print("Press Ctrl+C to exit")
try:
tcflush(sys.stdin, TCIFLUSH)
while True:
prompt = input("\nPrompt: ")
if prompt is None:
continue
if prompt == "exit":
handle_exit()
start_time = time.time()
response = query_engine.query(prompt)
end_time = time.time()
elapsed_time = end_time - start_time
print()
# Transform response to string and remove leading newline character if present
response = str(response).lstrip("\n")
print(f"Response({round(elapsed_time, 1)} sec): {response}")
except KeyboardInterrupt:
handle_exit()
def select_file():
"""
Select a file for processing.
"""
os.system("clear")
files = [file for file in os.listdir(FILES) if file.endswith(".pdf")]
if not files:
return "file.pdf" if os.path.exists("file.pdf") else None
print("Select a file")
for i, file in enumerate(files):
print(f"{i+1}. {file}")
print()
try:
possible_selections = list(range(len(files) + 1))
selection = int(input("Enter a number, or 0 to exit: "))
if selection == 0:
handle_exit()
elif selection not in possible_selections:
select_file()
else:
file_path = os.path.abspath(os.path.join(FILES, files[selection - 1]))
return file_path
except ValueError:
return select_file()
if __name__ == "__main__":
# Initialize the file directory
init()
if file := select_file():
# Start the interactive query session
ask(file)
else:
print("No files found")
handle_exit()
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.download_loader",
"llama_index.LLMPredictor"
] | [((557, 578), 'os.chdir', 'os.chdir', (['current_dir'], {}), '(current_dir)\n', (565, 578), False, 'import os\n'), ((611, 654), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.CRITICAL'}), '(level=logging.CRITICAL)\n', (630, 654), False, 'import logging\n'), ((685, 698), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (696, 698), False, 'from dotenv import load_dotenv\n'), ((491, 516), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (506, 516), False, 'import os\n'), ((970, 981), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (978, 981), False, 'import sys\n'), ((1183, 1211), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (1198, 1211), False, 'from llama_index import LLMPredictor, ServiceContext, download_loader, GPTVectorStoreIndex, LangchainEmbedding\n'), ((1343, 1368), 'os.getenv', 'os.getenv', (['"""ENDPOINT_URL"""'], {}), "('ENDPOINT_URL')\n", (1352, 1368), False, 'import os\n'), ((1425, 1712), 'langchain.llms.octoai_endpoint.OctoAIEndpoint', 'OctoAiCloudLLM', ([], {'endpoint_url': 'endpoint_url', 'model_kwargs': "{'model': 'llama-2-7b-chat', 'messages': [{'role': 'system', 'content':\n 'Below is an instruction that describes a task. Write a response that appropriately completes the request.'\n }], 'stream': False, 'max_tokens': 256}"}), "(endpoint_url=endpoint_url, model_kwargs={'model':\n 'llama-2-7b-chat', 'messages': [{'role': 'system', 'content':\n 'Below is an instruction that describes a task. Write a response that appropriately completes the request.'\n }], 'stream': False, 'max_tokens': 256})\n", (1439, 1712), True, 'from langchain.llms.octoai_endpoint import OctoAIEndpoint as OctoAiCloudLLM\n'), ((1854, 1875), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1866, 1875), False, 'from llama_index import LLMPredictor, ServiceContext, download_loader, GPTVectorStoreIndex, LangchainEmbedding\n'), ((2131, 2239), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size_limit': '(512)', 'embed_model': 'embeddings'}), '(llm_predictor=llm_predictor, chunk_size_limit=\n 512, embed_model=embeddings)\n', (2159, 2239), False, 'from llama_index import LLMPredictor, ServiceContext, download_loader, GPTVectorStoreIndex, LangchainEmbedding\n'), ((2300, 2378), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (2334, 2378), False, 'from llama_index import LLMPredictor, ServiceContext, download_loader, GPTVectorStoreIndex, LangchainEmbedding\n'), ((2536, 2554), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (2545, 2554), False, 'import os\n'), ((3418, 3436), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (3427, 3436), False, 'import os\n'), ((828, 849), 'os.path.exists', 'os.path.exists', (['FILES'], {}), '(FILES)\n', (842, 849), False, 'import os\n'), ((859, 874), 'os.mkdir', 'os.mkdir', (['FILES'], {}), '(FILES)\n', (867, 874), False, 'import os\n'), ((1958, 2052), 'langchain.embeddings.octoai_embeddings.OctoAIEmbeddings', 'OctoAIEmbeddings', ([], {'endpoint_url': '"""https://instructor-large-f1kzsig6xes9.octoai.run/predict"""'}), "(endpoint_url=\n 'https://instructor-large-f1kzsig6xes9.octoai.run/predict')\n", (1974, 2052), False, 'from langchain.embeddings.octoai_embeddings import OctoAIEmbeddings\n'), ((2674, 2702), 'termios.tcflush', 'tcflush', (['sys.stdin', 'TCIFLUSH'], {}), '(sys.stdin, TCIFLUSH)\n', (2681, 2702), False, 'from termios import tcflush, TCIFLUSH\n'), ((1275, 1285), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (1279, 1285), False, 'from pathlib import Path\n'), ((2909, 2920), 'time.time', 'time.time', ([], {}), '()\n', (2918, 2920), False, 'import time\n'), ((2994, 3005), 'time.time', 'time.time', ([], {}), '()\n', (3003, 3005), False, 'import time\n'), ((3467, 3484), 'os.listdir', 'os.listdir', (['FILES'], {}), '(FILES)\n', (3477, 3484), False, 'import os\n'), ((3559, 3585), 'os.path.exists', 'os.path.exists', (['"""file.pdf"""'], {}), "('file.pdf')\n", (3573, 3585), False, 'import os\n'), ((4023, 4064), 'os.path.join', 'os.path.join', (['FILES', 'files[selection - 1]'], {}), '(FILES, files[selection - 1])\n', (4035, 4064), False, 'import os\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError(
"`youtube_transcript_api` package not found, \
please run `pip install youtube-transcript-api`"
)
def load_data(self, urls: List[str]) -> List[Document]:
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=urls)
return documents
| [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
# Databricks notebook source
# MAGIC %md
# MAGIC # Understanding Embeddings
# MAGIC Embeddings are just vectors and we can visualise and analyse them as such \
# MAGIC A common way to look at and explore embeddings is to use TSNE visualisations. \
# MAGIC This can be applied to our VectorDB Data too.
# MAGIC
# MAGIC See: https://www.kaggle.com/code/colinmorris/visualizing-embeddings-with-t-sne
# MAGIC
# MAGIC An open source tool that you might want to investigate for this as well is Arize Phoenix \
# MAGIC See: https://docs.arize.com/phoenix/
# COMMAND ----------
# MAGIC # "arize-phoenix[experimental]" pandas==1.5.3
# MAGIC %pip install -U llama_index==0.8.54 faiss-cpu datashader bokeh holoviews scikit-image colorcet "arize-phoenix[experimental]"
# COMMAND ----------
dbutils.library.restartPython()
# COMMAND ----------
# MAGIC %md
# MAGIC # Setup configs
# COMMAND ----------
# MAGIC %run ./utils
# COMMAND ----------
import os
import numpy as np
# COMMAND ----------
# DBTITLE 1,Configurations
# test_pdf = f'{dbfs_source_docs}/2010.11934.pdf'
test_pdf = '/dbfs/bootcamp_data/pdf_data/2302.09419.pdf'
test_pdf
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # Understanding Embeddings
# MAGIC
# MAGIC Lets explore how data embeds a bit more in order to see how we can improve retrieval \
# MAGIC We will use a model deployed on Databricks Model Serving
# COMMAND ----------
# DBTITLE 1,Setup some embedding algorithms
browser_host = dbutils.notebook.entry_point.getDbutils().notebook().getContext().browserHostName().get()
db_host = f"https://{browser_host}"
db_token = dbutils.notebook.entry_point.getDbutils().notebook().getContext().apiToken().get()
serving_uri = 'vicuna_13b'
serving_model_uri = f"{db_host}/serving-endpoints/{serving_uri}/invocations"
embedding_uri = 'brian_embedding_endpoint'
embedding_model_uri = f"{db_host}/serving-endpoints/{embedding_uri}/invocations"
llm_model = ServingEndpointLLM(endpoint_url=serving_model_uri, token=db_token)
embeddings = ModelServingEndpointEmbeddings(db_api_token=db_token)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Simple Exploration w ReRank
# COMMAND ----------
# most vector stores use cosine_similarity
import faiss
example_sentences = ["The kangaroo population in Australia is declining due to habitat loss and hunting.",
"Australia has a diverse population of people from all over the world.",
"The kangaroo is a symbol of Australia and appears on its coat of arms.",
"The population of Australia is projected to reach 50 million by 2050.",
"Kangaroos are unique to Australia and can only be found in the wild there.",
"The indigenous population of Australia has been marginalized for centuries.",
"Australia is home to a variety of fascinating animals, including the kangaroo.",
"The population density of Australia is relatively low compared to other countries.",
"Kangaroos play an important role in maintaining the ecosystem balance in Australia.",
"Australia has strict laws regulating the hunting and trade of kangaroos to protect their population."]
encoded_sentences = [embeddings.embed_query(sentence) for sentence in example_sentences]
vector_format_encode = np.array(encoded_sentences, dtype=np.float32)
vector_format_encode /= np.linalg.norm(vector_format_encode, axis=1)[:, np.newaxis]
# we will create a vector index
vector_index = faiss.IndexFlatIP(vector_format_encode.shape[1])
vector_index.add(vector_format_encode)
test_question = "What is affecting the population of kangaroos?"
embedded_query = np.array(embeddings.embed_query(test_question))
# COMMAND ----------
# we can look at the retrieved entries and how it has been processed
k = 4
scores, index = vector_index.search(np.array([embedded_query]), k)
# look up the index for sentences
top_sentences = [example_sentences[i] for i in index[0]]
human_readable_result = list(zip(scores.reshape(-1, 1), top_sentences))
for score, sentence in human_readable_result:
print(f"Score: {score[0]:.4f}, Sentence: {sentence}")
# COMMAND ----------
# we can use a rerank to try to improve the result
format_top = []
for i in range(len(top_sentences)):
format_top.append(
f"Document {1}:\n"
f"{top_sentences[i]}"
)
context_str = "\n\n".join(format_top)
## Our Reranking prompt
rerank_prompt = ("A list of documents is shown below. Each document has a number next to it along "
"with a summary of the document. A question is also provided. \n"
"Respond with the numbers of the documents "
"you should consult to answer the question, in order of relevance, as well \n"
"as the relevance score. The relevance score is a number from 1-10 based on "
"how relevant you think the document is to the question.\n"
"Do not include any documents that are not relevant to the question. \n"
"Example format: \n"
"Document 1:\n<summary of document 1>\n\n"
"Document 2:\n<summary of document 2>\n\n"
"...\n\n"
"Document 10:\n<summary of document 10>\n\n"
"Question: <question>\n"
"Answer:\n"
"Doc: 9, Relevance: 7\n"
"Doc: 3, Relevance: 4\n"
"Doc: 7, Relevance: 3\n\n"
"Let's try this now: \n\n"
f"{context_str}\n"
f"Question: {test_question}\n"
"Answer:\n")
reranked_result = llm_model(rerank_prompt)
print(reranked_result)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Visualising Embeddings
# COMMAND ----------
# So we can use reranking in order to better craft our results.
# Can we also look at our embeddings to understand the content?
# We will use umap and bokeh for this
import pandas as pd
import umap
from umap import plot
import plotly.express as px
from bokeh.resources import CDN
from bokeh.embed import file_html
umap_2d = umap.UMAP(n_components=2, init='random', random_state=0)
#umap_3d = umap.UMAP(n_components=3, init='random', random_state=0)
proj_2d = umap_2d.fit(vector_format_encode)
hover_data = pd.DataFrame({'index': np.arange(len(example_sentences)) ,
'text': example_sentences})
# COMMAND ----------
plot.output_notebook()
# COMMAND ----------
# MAGIC %md
# MAGIC We can now visualise the data, note that we don't have a lot of datapoints \
# MAGIC so there aren't any obvious patterns in these but as you add more points patterns should appear
# COMMAND ----------
# hover_data=hover_data,
p = plot.interactive(proj_2d, point_size=10)
html = file_html(p, CDN, "Sample Sentences")
displayHTML(html)
# COMMAND ----------
# MAGIC %md
# MAGIC # Embeddings with Whole Document
# MAGIC
# COMMAND ----------
# MAGIC %md ## Setup Service Context
# MAGIC By default, llama_index assumes that OpenAI is the service context \
# MAGIC We are using AzureOpen AI so the setup is a little different. \
# MAGIC Azure OpenAI notably requires two deployments, an embedder and the model \
# MAGIC We will demonstrate a hybrid setup here where we use a huggingface sentence transformer \
# MAGIC that will do the embeddings for our vector store \
# MAGIC Whilst AzureOpenAI (gpt-3.5-turbo) provides the brains
# COMMAND ----------
from llama_index import (
ServiceContext,
set_global_service_context,
LLMPredictor
)
from llama_index.embeddings import LangchainEmbedding
from llama_index.callbacks import CallbackManager, OpenInferenceCallbackHandler, LlamaDebugHandler
callback_handler = OpenInferenceCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm_predictor = LLMPredictor(llm=llm_model)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor,
embed_model=embeddings,
callback_manager = callback_manager
)
# we can now set this context to be a global default
set_global_service_context(service_context)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Load and Chunk Document
# MAGIC We will load a sample doc to test on, firstly with a naive default chunking strategy
# MAGIC
# COMMAND ----------
# DBTITLE 1,Create Index
# chunk the output
from llama_index import (
download_loader, VectorStoreIndex
)
from llama_index.evaluation import DatasetGenerator
from pathlib import Path
PDFReader = download_loader('PDFReader')
loader = PDFReader()
# This produces a list of llama_index document objects
documents = loader.load_data(file=Path(test_pdf))
# we are just setting up a simple in memory Vectorstore here
index = VectorStoreIndex.from_documents(documents)
# COMMAND ----------
# Lets have a quick look at the embeddings
text_obj = [document.text for document in documents]
encoded_chunks = [embeddings.embed_query(document_text) for document_text in text_obj]
vector_chunks = np.array(encoded_chunks, dtype=np.float32)
vector_chunks /= np.linalg.norm(vector_chunks, axis=1)[:, np.newaxis]
# COMMAND ----------
# DBTITLE 1,Examine Chunk text
pd.set_option('display.max_colwidth', 1000)
hover_data
# COMMAND ----------
# DBTITLE 1,Visualise Chunk Text
umap_2d = umap.UMAP(n_components=2, init='random', random_state=0)
#umap_3d = umap.UMAP(n_components=3, init='random', random_state=0)
proj_2d = umap_2d.fit(vector_chunks)
hover_data = pd.DataFrame({'index': np.arange(len(text_obj)) ,
'text': text_obj})
# hover_data=hover_data,
p = plot.interactive(proj_2d, point_size=10)
html = file_html(p, CDN, "Research Doc")
displayHTML(html)
# COMMAND ----------
# MAGIC %md TODO BIER comparison of embedding algorithms
# COMMAND ----------
# DBTITLE 1,Create Sample Questions
import nest_asyncio
nest_asyncio.apply()
# and turning it into a query engine
query_engine = index.as_query_engine()
# this is the question generator. Note that it has additional settings to customise prompt etc
data_generator = DatasetGenerator.from_documents(documents=documents,
service_context=service_context)
# this is the call to generate the questions
eval_questions = data_generator.generate_questions_from_nodes()
# COMMAND ----------
# MAGIC %md
# MAGIC # (WIP) Create Phoenix Visualisations
# MAGIC TODO We are working with the Arize team to make Phoenix work \
# MAGIC till that happens this code will not be of use for now
# COMMAND ----------
# Extract out nodes
# test parse index data
document_ids = []
document_texts = []
document_embeddings = []
docstore = index.storage_context.docstore
for node_id, node in docstore.docs.items():
document_ids.append(node.hash) # use node hash as the document ID
document_texts.append(node.text)
document_embeddings.append(np.array(index.storage_context.vector_store.get(node_id)))
dataset_df = pd.DataFrame(
{
"document_id": document_ids,
"text": document_texts,
"text_vector": document_embeddings,
}
)
# COMMAND ----------
# create the query frame
from llama_index.callbacks.open_inference_callback import as_dataframe
callback_handler = OpenInferenceCallbackHandler()
query_data_buffer = callback_handler.flush_query_data_buffer()
sample_query_df = as_dataframe(query_data_buffer)
sample_query_df
# COMMAND ----------
import phoenix as px
### Create the schema for the documents
database_schema = px.Schema(
prediction_id_column_name="document_id",
prompt_column_names=px.EmbeddingColumnNames(
vector_column_name="text_vector",
raw_data_column_name="text",
),
)
database_ds = px.Dataset(
dataframe=dataset_df,
schema=database_schema,
name="database",
)
query_ds = px.Dataset.from_open_inference(sample_query_df)
# COMMAND ----------
# MAGIC %md
# MAGIC # Start Visualisation App
# COMMAND ----------
session = px.launch_app(primary=query_ds, corpus=database_ds, host='0.0.0.0', port='10101')
# COMMAND ---------- | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.evaluation.DatasetGenerator.from_documents",
"llama_index.download_loader",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.OpenInferenceCallbackHandler",
"llama_index.set_global_service_context",
"llama_index.callbacks.CallbackManager",
"llama_index.callbacks.open_inference_callback.as_dataframe"
] | [((3171, 3216), 'numpy.array', 'np.array', (['encoded_sentences'], {'dtype': 'np.float32'}), '(encoded_sentences, dtype=np.float32)\n', (3179, 3216), True, 'import numpy as np\n'), ((3349, 3397), 'faiss.IndexFlatIP', 'faiss.IndexFlatIP', (['vector_format_encode.shape[1]'], {}), '(vector_format_encode.shape[1])\n', (3366, 3397), False, 'import faiss\n'), ((5704, 5760), 'umap.UMAP', 'umap.UMAP', ([], {'n_components': '(2)', 'init': '"""random"""', 'random_state': '(0)'}), "(n_components=2, init='random', random_state=0)\n", (5713, 5760), False, 'import umap\n'), ((6025, 6047), 'umap.plot.output_notebook', 'plot.output_notebook', ([], {}), '()\n', (6045, 6047), False, 'from umap import plot\n'), ((6322, 6362), 'umap.plot.interactive', 'plot.interactive', (['proj_2d'], {'point_size': '(10)'}), '(proj_2d, point_size=10)\n', (6338, 6362), False, 'from umap import plot\n'), ((6371, 6408), 'bokeh.embed.file_html', 'file_html', (['p', 'CDN', '"""Sample Sentences"""'], {}), "(p, CDN, 'Sample Sentences')\n", (6380, 6408), False, 'from bokeh.embed import file_html\n'), ((7311, 7341), 'llama_index.callbacks.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '()\n', (7339, 7341), False, 'from llama_index.callbacks import CallbackManager, OpenInferenceCallbackHandler, LlamaDebugHandler\n'), ((7361, 7396), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[callback_handler]'], {}), '([callback_handler])\n', (7376, 7396), False, 'from llama_index.callbacks import CallbackManager, OpenInferenceCallbackHandler, LlamaDebugHandler\n'), ((7414, 7441), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm_model'}), '(llm=llm_model)\n', (7426, 7441), False, 'from llama_index import ServiceContext, set_global_service_context, LLMPredictor\n'), ((7460, 7581), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embeddings', 'callback_manager': 'callback_manager'}), '(llm_predictor=llm_predictor, embed_model=\n embeddings, callback_manager=callback_manager)\n', (7488, 7581), False, 'from llama_index import ServiceContext, set_global_service_context, LLMPredictor\n'), ((7777, 7820), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (7803, 7820), False, 'from llama_index import ServiceContext, set_global_service_context, LLMPredictor\n'), ((8215, 8243), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (8230, 8243), False, 'from llama_index import download_loader, VectorStoreIndex\n'), ((8441, 8483), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (8472, 8483), False, 'from llama_index import download_loader, VectorStoreIndex\n'), ((8707, 8749), 'numpy.array', 'np.array', (['encoded_chunks'], {'dtype': 'np.float32'}), '(encoded_chunks, dtype=np.float32)\n', (8715, 8749), True, 'import numpy as np\n'), ((8874, 8917), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(1000)'], {}), "('display.max_colwidth', 1000)\n", (8887, 8917), True, 'import pandas as pd\n'), ((8995, 9051), 'umap.UMAP', 'umap.UMAP', ([], {'n_components': '(2)', 'init': '"""random"""', 'random_state': '(0)'}), "(n_components=2, init='random', random_state=0)\n", (9004, 9051), False, 'import umap\n'), ((9298, 9338), 'umap.plot.interactive', 'plot.interactive', (['proj_2d'], {'point_size': '(10)'}), '(proj_2d, point_size=10)\n', (9314, 9338), False, 'from umap import plot\n'), ((9347, 9380), 'bokeh.embed.file_html', 'file_html', (['p', 'CDN', '"""Research Doc"""'], {}), "(p, CDN, 'Research Doc')\n", (9356, 9380), False, 'from bokeh.embed import file_html\n'), ((9559, 9579), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (9577, 9579), False, 'import nest_asyncio\n'), ((9770, 9860), 'llama_index.evaluation.DatasetGenerator.from_documents', 'DatasetGenerator.from_documents', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(documents=documents, service_context=\n service_context)\n', (9801, 9860), False, 'from llama_index.evaluation import DatasetGenerator\n'), ((10653, 10760), 'pandas.DataFrame', 'pd.DataFrame', (["{'document_id': document_ids, 'text': document_texts, 'text_vector':\n document_embeddings}"], {}), "({'document_id': document_ids, 'text': document_texts,\n 'text_vector': document_embeddings})\n", (10665, 10760), True, 'import pandas as pd\n'), ((10957, 10987), 'llama_index.callbacks.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '()\n', (10985, 10987), False, 'from llama_index.callbacks import CallbackManager, OpenInferenceCallbackHandler, LlamaDebugHandler\n'), ((11069, 11100), 'llama_index.callbacks.open_inference_callback.as_dataframe', 'as_dataframe', (['query_data_buffer'], {}), '(query_data_buffer)\n', (11081, 11100), False, 'from llama_index.callbacks.open_inference_callback import as_dataframe\n'), ((11427, 11500), 'phoenix.Dataset', 'px.Dataset', ([], {'dataframe': 'dataset_df', 'schema': 'database_schema', 'name': '"""database"""'}), "(dataframe=dataset_df, schema=database_schema, name='database')\n", (11437, 11500), True, 'import phoenix as px\n'), ((11528, 11575), 'phoenix.Dataset.from_open_inference', 'px.Dataset.from_open_inference', (['sample_query_df'], {}), '(sample_query_df)\n', (11558, 11575), True, 'import phoenix as px\n'), ((11678, 11764), 'phoenix.launch_app', 'px.launch_app', ([], {'primary': 'query_ds', 'corpus': 'database_ds', 'host': '"""0.0.0.0"""', 'port': '"""10101"""'}), "(primary=query_ds, corpus=database_ds, host='0.0.0.0', port=\n '10101')\n", (11691, 11764), True, 'import phoenix as px\n'), ((3241, 3285), 'numpy.linalg.norm', 'np.linalg.norm', (['vector_format_encode'], {'axis': '(1)'}), '(vector_format_encode, axis=1)\n', (3255, 3285), True, 'import numpy as np\n'), ((3702, 3728), 'numpy.array', 'np.array', (['[embedded_query]'], {}), '([embedded_query])\n', (3710, 3728), True, 'import numpy as np\n'), ((8767, 8804), 'numpy.linalg.norm', 'np.linalg.norm', (['vector_chunks'], {'axis': '(1)'}), '(vector_chunks, axis=1)\n', (8781, 8804), True, 'import numpy as np\n'), ((8355, 8369), 'pathlib.Path', 'Path', (['test_pdf'], {}), '(test_pdf)\n', (8359, 8369), False, 'from pathlib import Path\n'), ((11300, 11390), 'phoenix.EmbeddingColumnNames', 'px.EmbeddingColumnNames', ([], {'vector_column_name': '"""text_vector"""', 'raw_data_column_name': '"""text"""'}), "(vector_column_name='text_vector',\n raw_data_column_name='text')\n", (11323, 11390), True, 'import phoenix as px\n')] |
import asyncio
import io
import json
import logging
import os
import tempfile
import time
from logging import getLogger
from fastapi import APIRouter, Request, status
from fastapi.encoders import jsonable_encoder
from typing import List, Tuple
# This is here to satisfy runtime import needs
# that pyinstaller appears to miss
from llama_index.schema import BaseNode, TextNode, NodeRelationship, RelatedNodeInfo
from llama_index import Document, VectorStoreIndex
from llama_index import Document
from snowflake import SnowflakeGenerator
from service.dependencies import (
TANA_NODE,
TANA_TEXT,
ChromaRequest,
LlamaRequest,
TanaNodeMetadata,
capture_logs,
)
from service.endpoints.chroma import chroma_upsert
from service.endpoints.topics import TanaDocument, extract_topic_from_context, extract_topics
from service.llamaindex import create_index, get_index
from service.tana_types import TanaDump
from service.txntimer import txntimer
logger = getLogger()
snowflakes = SnowflakeGenerator(42)
router = APIRouter()
minutes = 1000 * 60
# TODO: Add header support throughout so we can pass Tana API key and OpenAPI Key as headers
# NOTE: we already have this in the main.py middleware wrapper, but it would be better
# to do it here for OpenAPI spec purposes.
# x_tana_api_token: Annotated[str | None, Header()] = None
# x_openai_api_key: Annotated[str | None, Header()] = None
# TODO: change this to remove LLamaindex and simply go directly to ChromaDB
async def load_chromadb_from_topics(topics:List[TanaDocument], model:str, observe=False):
'''Load the topic index from the topic array directly.'''
logger.info('Building ChromaDB vectors from nodes')
index_nodes = []
# loop through all the topics and create a Document for each
for topic in topics:
(doc_node, text_nodes) = document_from_topic(topic)
index_nodes.append(doc_node)
index_nodes.extend(text_nodes)
logger.info(f'Gathered {len(index_nodes)} tana nodes')
logger.info("Preparing storage context")
for node in index_nodes:
logger.info(f'Node {node.node_id} {node.metadata}')
chroma_req = ChromaRequest(context=node.text, nodeId=node.node_id, model=model)
upsert = await chroma_upsert(chroma_req)
logger.info("ChromaDB populated and ready")
return index_nodes
def load_index_from_topics(topics:List[TanaDocument], model:str, observe=False):
'''Load the topic index from the topic array directly.'''
logger.info('Building llama_index nodes')
index_nodes = []
# loop through all the topics and create a Document for each
for topic in topics:
(doc_node, text_nodes) = document_from_topic(topic)
index_nodes.append(doc_node)
index_nodes.extend(text_nodes)
logger.info(f'Gathered {len(index_nodes)} tana nodes')
logger.info("Preparing storage context")
index = create_index(model, observe, index_nodes)
logger.info("Llamaindex populated and ready")
return index
def document_from_topic(topic) -> Tuple[Document, List[TextNode]]:
'''Load a single topic into the index_nodes list.'''
text_nodes = []
metadata = {
'category': TANA_NODE,
'supertag': ' '.join(['#' + tag for tag in topic.tags]),
'title': topic.name,
'tana_id': topic.id,
'document_id': topic.id,
}
if topic.fields:
# get all the fields as metdata as well
fields = set([field.name for field in topic.fields])
for field_name in fields:
metadata[field_name] = ' '.join([field.value for field in topic.fields if field.name == field_name])
# what other props do we need to create?
# document = Document(id_=topic.id, text=topic.name) # type: ignore
# we only add the ffirst line and fields to the document payload
# anything else and we blow out the token limits (and cost a lot!)
text = topic.content[0][2]
document_node = Document(doc_id=topic.id, text=text) # first line only
document_node.metadata = metadata
# # make a note of the document in our nodes list
# index_nodes.append(document_node)
# now iterate all the remaining topic.content and create a node for each
# each of these is simply a string, being the name of a tana child node
# but with [[]name^id]] reference syntax used for references
# TODO: make these tana_nodes richer structurally
# TODO: use actual tana node id here perhaps?
previous_text_node = None
if len(topic.content) > 30:
logger.warning(f'Large topic {topic.id} with {len(topic.content)} children')
# process all the child content records...
for (content_id, is_ref, tana_element) in topic.content[1:]:
content_metadata = TanaNodeMetadata(
category=TANA_TEXT,
title=topic.name,
topic_id=topic.id,
# TODO: ? 'supertag': ' '.join(['#' + tag for tag in topic.tags]),
# text gets added below...
)
# wire up the tana_node as an index_node with the text as the payload
if is_ref:
current_text_node = TextNode(text=tana_element)
current_text_node.metadata['tana_ref_id'] = content_id
else:
current_text_node = TextNode(id_=content_id, text=tana_element)
current_text_node.metadata['tana_id'] = content_id
current_text_node.metadata = content_metadata.model_dump()
# check if this is a reference node and add additional metadata
# TODO: backport this to chroma upsert...?
current_text_node.relationships[NodeRelationship.SOURCE] = RelatedNodeInfo(node_id=document_node.node_id)
# wire up next/previous
if previous_text_node:
current_text_node.relationships[NodeRelationship.PREVIOUS] = RelatedNodeInfo(node_id=previous_text_node.node_id)
previous_text_node.relationships[NodeRelationship.NEXT] = RelatedNodeInfo(node_id=current_text_node.node_id)
text_nodes.append(current_text_node)
previous_text_node = current_text_node
return (document_node, text_nodes)
# attempt to paralleize non-async code
# see https://github.com/tiangolo/fastapi/discussions/6347
lock = asyncio.Lock()
# Note: accepts ?model= query param
@router.post("/llamaindex/upsert", status_code=status.HTTP_204_NO_CONTENT, tags=["preload"])
async def llama_upsert(request: Request, req:LlamaRequest, model:str="openai", observe:bool=False):
'''Upserts a single Tana node context into the Llama index.
'''
async with lock:
tana_id = req.nodeId
tana_context = req.context
(index, _, _, _) = get_index(model, observe)
result = extract_topic_from_context(tana_id, tana_context)
(document, text_nodes) = document_from_topic(result)
# remove then add back
# index.delete_nodes([document.node_id] + [node.node_id for node in text_nodes])
# strip '0' ids from node list
id_list = [document.node_id] + [node.node_id for node in text_nodes if node.node_id != '0']
for node_id in id_list:
index.delete_ref_doc(node_id)
# filter out the '0' id nodes
text_nodes = [node for node in text_nodes if node.node_id != '0']
node_list = [document] + text_nodes
index.insert_nodes(node_list)
#TODO: consider returning some kind of completion confirmation payload with statidtics
return None
# Note: accepts ?model= query param
@router.post("/llamaindex/preload", tags=["preload"])
async def llama_preload(request: Request, tana_dump:TanaDump, model:str="openai"):
'''Accepts a Tana dump JSON payload and builds the index from it.
Uses the topic extraction code from the topics endpoint to build
an object tree in memory, then loads that into ChromaDB via LLamaIndex.
Returns a list of log messages from the process.
'''
async with lock:
messages = []
async with capture_logs(logger) as logs:
result = await extract_topics(tana_dump, 'JSON') # type: ignore
logger.info('Extracted topics from Tana dump')
# save output to a temporary file
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, 'topics.json')
logger.info(f'Saving topics to {path}')
# use path
with open(path, "w") as f:
json_result = jsonable_encoder(result)
f.write(json.dumps(json_result))
logger.info('Loading index ...')
# don't use file anymore...
# load_index_from_file(path)
# load directly from in-memory representation
await load_chromadb_from_topics(result, model=model)
# load_index_from_topics(result, model=model)
# logger.info(f'Deleted temp file {path}')
messages = logs.getvalue()
return messages
| [
"llama_index.schema.TextNode",
"llama_index.schema.RelatedNodeInfo",
"llama_index.Document"
] | [((975, 986), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (984, 986), False, 'from logging import getLogger\n'), ((1001, 1023), 'snowflake.SnowflakeGenerator', 'SnowflakeGenerator', (['(42)'], {}), '(42)\n', (1019, 1023), False, 'from snowflake import SnowflakeGenerator\n'), ((1034, 1045), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (1043, 1045), False, 'from fastapi import APIRouter, Request, status\n'), ((5959, 5973), 'asyncio.Lock', 'asyncio.Lock', ([], {}), '()\n', (5971, 5973), False, 'import asyncio\n'), ((2838, 2879), 'service.llamaindex.create_index', 'create_index', (['model', 'observe', 'index_nodes'], {}), '(model, observe, index_nodes)\n', (2850, 2879), False, 'from service.llamaindex import create_index, get_index\n'), ((3831, 3867), 'llama_index.Document', 'Document', ([], {'doc_id': 'topic.id', 'text': 'text'}), '(doc_id=topic.id, text=text)\n', (3839, 3867), False, 'from llama_index import Document\n'), ((2125, 2191), 'service.dependencies.ChromaRequest', 'ChromaRequest', ([], {'context': 'node.text', 'nodeId': 'node.node_id', 'model': 'model'}), '(context=node.text, nodeId=node.node_id, model=model)\n', (2138, 2191), False, 'from service.dependencies import TANA_NODE, TANA_TEXT, ChromaRequest, LlamaRequest, TanaNodeMetadata, capture_logs\n'), ((4597, 4670), 'service.dependencies.TanaNodeMetadata', 'TanaNodeMetadata', ([], {'category': 'TANA_TEXT', 'title': 'topic.name', 'topic_id': 'topic.id'}), '(category=TANA_TEXT, title=topic.name, topic_id=topic.id)\n', (4613, 4670), False, 'from service.dependencies import TANA_NODE, TANA_TEXT, ChromaRequest, LlamaRequest, TanaNodeMetadata, capture_logs\n'), ((5392, 5438), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'document_node.node_id'}), '(node_id=document_node.node_id)\n', (5407, 5438), False, 'from llama_index.schema import BaseNode, TextNode, NodeRelationship, RelatedNodeInfo\n'), ((6371, 6396), 'service.llamaindex.get_index', 'get_index', (['model', 'observe'], {}), '(model, observe)\n', (6380, 6396), False, 'from service.llamaindex import create_index, get_index\n'), ((6410, 6459), 'service.endpoints.topics.extract_topic_from_context', 'extract_topic_from_context', (['tana_id', 'tana_context'], {}), '(tana_id, tana_context)\n', (6436, 6459), False, 'from service.endpoints.topics import TanaDocument, extract_topic_from_context, extract_topics\n'), ((2211, 2236), 'service.endpoints.chroma.chroma_upsert', 'chroma_upsert', (['chroma_req'], {}), '(chroma_req)\n', (2224, 2236), False, 'from service.endpoints.chroma import chroma_upsert\n'), ((4922, 4949), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'tana_element'}), '(text=tana_element)\n', (4930, 4949), False, 'from llama_index.schema import BaseNode, TextNode, NodeRelationship, RelatedNodeInfo\n'), ((5047, 5090), 'llama_index.schema.TextNode', 'TextNode', ([], {'id_': 'content_id', 'text': 'tana_element'}), '(id_=content_id, text=tana_element)\n', (5055, 5090), False, 'from llama_index.schema import BaseNode, TextNode, NodeRelationship, RelatedNodeInfo\n'), ((5561, 5612), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'previous_text_node.node_id'}), '(node_id=previous_text_node.node_id)\n', (5576, 5612), False, 'from llama_index.schema import BaseNode, TextNode, NodeRelationship, RelatedNodeInfo\n'), ((5677, 5727), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'current_text_node.node_id'}), '(node_id=current_text_node.node_id)\n', (5692, 5727), False, 'from llama_index.schema import BaseNode, TextNode, NodeRelationship, RelatedNodeInfo\n'), ((7617, 7637), 'service.dependencies.capture_logs', 'capture_logs', (['logger'], {}), '(logger)\n', (7629, 7637), False, 'from service.dependencies import TANA_NODE, TANA_TEXT, ChromaRequest, LlamaRequest, TanaNodeMetadata, capture_logs\n'), ((7668, 7701), 'service.endpoints.topics.extract_topics', 'extract_topics', (['tana_dump', '"""JSON"""'], {}), "(tana_dump, 'JSON')\n", (7682, 7701), False, 'from service.endpoints.topics import TanaDocument, extract_topic_from_context, extract_topics\n'), ((7822, 7851), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7849, 7851), False, 'import tempfile\n'), ((7875, 7907), 'os.path.join', 'os.path.join', (['tmp', '"""topics.json"""'], {}), "(tmp, 'topics.json')\n", (7887, 7907), False, 'import os\n'), ((8034, 8058), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', (['result'], {}), '(result)\n', (8050, 8058), False, 'from fastapi.encoders import jsonable_encoder\n'), ((8077, 8100), 'json.dumps', 'json.dumps', (['json_result'], {}), '(json_result)\n', (8087, 8100), False, 'import json\n')] |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.getLogger(__name__)
CONTENT_TAGS = [
"p",
"div",
"span",
"a",
"td",
"tr",
"li",
"article",
"section",
"pre",
"code",
"blockquote",
"em",
"strong",
"b",
"i",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"title",
]
def scrape(html: str) -> str:
soup: BeautifulSoup = BeautifulSoup(html, "html.parser")
content: List[Tag] = soup.find_all(CONTENT_TAGS)
text_set: Set[str] = set()
for p in content:
for text in p.stripped_strings:
text_set.add(text)
return " ".join(text_set)
async def async_load_content_using_playwright(url: str) -> str:
try:
from playwright.async_api import async_playwright
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(url)
html = await page.content()
await browser.close()
return html
except ImportError:
raise ImportError(
"`playwright` package not found, please install it with "
"`pip install playwright && playwright install`"
)
def load_content_using_playwright(url: str) -> str:
return asyncio.get_event_loop().run_until_complete(
async_load_content_using_playwright(url)
)
class LyzrWebPageReader:
def __init__(self) -> None:
pass
@staticmethod
def load_data(url: str) -> List[Document]:
if IS_IPYKERNEL:
warning_msg = "Running in Google Colab or a Jupyter notebook. Consider using nest_asyncio.apply() to avoid event loop conflicts."
warnings.warn(warning_msg, RuntimeWarning)
html = load_content_using_playwright(url)
content = scrape(html)
document = Document(text=content, metadata={"url": url})
return [document]
| [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (689, 710), False, 'from bs4 import BeautifulSoup, Tag\n'), ((2182, 2227), 'llama_index.schema.Document', 'Document', ([], {'text': 'content', 'metadata': "{'url': url}"}), "(text=content, metadata={'url': url})\n", (2190, 2227), False, 'from llama_index.schema import Document\n'), ((1088, 1106), 'playwright.async_api.async_playwright', 'async_playwright', ([], {}), '()\n', (1104, 1106), False, 'from playwright.async_api import async_playwright\n'), ((1609, 1633), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1631, 1633), False, 'import asyncio\n'), ((2030, 2072), 'warnings.warn', 'warnings.warn', (['warning_msg', 'RuntimeWarning'], {}), '(warning_msg, RuntimeWarning)\n', (2043, 2072), False, 'import warnings\n')] |
# coding=utf-8
import glob, pprint, traceback, pydoc, threading, asyncio
import os, re, webbrowser, platform, zipfile, subprocess, config
from prompt_toolkit.input import create_input
from prompt_toolkit.keys import Keys
from datetime import date
from db.StatisticsWordsSqlite import StatisticsWordsSqlite
from util.VlcUtil import VlcUtil
from util.exlbl import allLocations, tc_location_names, sc_location_names
from util.PluginEventHandler import PluginEventHandler
from util.WebtopUtil import WebtopUtil
from util.CatalogUtil import CatalogUtil
from util.GitHubRepoInfo import GitHubRepoInfo
from util.HtmlGeneratorUtil import HtmlGeneratorUtil
from util.TextUtil import TextUtil
from util.FileUtil import FileUtil
from util.LexicalData import LexicalData
from util.readings import allDays
from functools import partial
from util.BibleVerseParser import BibleVerseParser
from util.BibleBooks import BibleBooks
from db.AGBTSData import AGBTSData
from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData
from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, \
DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, \
Lexicon, LexiconData
from util.ThirdParty import ThirdPartyDictionary
from util.HebrewTransliteration import HebrewTransliteration
from db.NoteSqlite import NoteSqlite
from util.Translator import Translator
from db.Highlight import Highlight
from util.TtsLanguages import TtsLanguages
from db.BiblesSqlite import MorphologySqlite
from db.JournalSqlite import JournalSqlite
#from gui.Downloader import Downloader
from install.module import *
from util.DatafileLocation import DatafileLocation
if config.qtLibrary == "pyside6":
try:
#QtTextToSpeech is currently not in PySide6 pip3 package
#ModuleNotFoundError: No module named 'PySide6.QtTextToSpeech'
from PySide6.QtTextToSpeech import QTextToSpeech
except:
pass
else:
try:
# Note: qtpy.QtTextToSpeech is not found!
from PySide2.QtTextToSpeech import QTextToSpeech
except:
try:
from PyQt5.QtTextToSpeech import QTextToSpeech
except:
pass
class TextCommandParser:
last_lexicon_entry = ''
last_text_search = ''
def __init__(self, parent):
self.parent = parent
self.lastKeyword = None
self.cliTtsProcess = None
self.qtTtsEngine = None
self.llamaIndexUpdated = False
self.locationMap = {exlbl_entry: (name[0].upper(), name, float(latitude), float(longitude)) for exlbl_entry, name, latitude, longitude in allLocations}
self.interpreters = {
"bible": (self.textBible, """
# [KEYWORD] BIBLE
# Feature - Open a bible chapter or multiples verses on main or study view.
# Usage - BIBLE:::[BIBLE_VERSION]:::[BIBLE_REFERENCE(S)]
# Remarks:
# 1) The bible version last opened on main view is opened by default if "[BIBLE_VERSION]:::" or "BIBLE:::[BIBLE_VERSION]:::" is omitted.
# 2) If "BIBLE:::" command is called from manual entry via command field or a link within the content on main view, bible text is opened on main view.
# 3) If "BIBLE:::" command is called from a link within the content on study view and "Bible Display on Study View" is enabled, bible text is opened on study view.
# 4) If "BIBLE:::" command is called from a link within the content on study view and "Bible Display on Study View" is disabled, bible text is opened on main view.
# 5) Common abbreviations of bible references are supported.
# Examples:
# e.g. John 3:16
# e.g. Jn 3:16; Rm 5:8; Deu 6:4
# e.g. BIBLE:::John 3:16
# e.g. BIBLE:::Jn 3:16; Rm 5:8; Deu 6:4
# e.g. BIBLE:::KJV:::John 3:16
# e.g. BIBLE:::KJV:::Jn 3:16; Rm 5:8; Deu 6:4"""),
"main": (self.textMain, """
# [KEYWORD] MAIN
# Feature - Open a bible chapter or multiples verses on main view.
# Usage - MAIN:::[BIBLE_VERSION]:::[BIBLE_REFERENCE(S)]
# Remarks:
# 1) The bible version last opened on main view is opened by default if "[BIBLE_VERSION]:::" or "MAIN:::[BIBLE_VERSION]:::" is omitted.
# 2) Common abbreviations of bible references are supported.
# Examples:
# e.g. John 3:16
# e.g. Jn 3:16; Rm 5:8; Deu 6:4
# e.g. MAIN:::John 3:16
# e.g. MAIN:::Jn 3:16; Rm 5:8; Deu 6:4
# e.g. MAIN:::KJV:::John 3:16
# e.g. MAIN:::KJV:::Jn 3:16; Rm 5:8; Deu 6:4"""),
"study": (self.textStudy, """
# [KEYWORD] STUDY
# Feature - Open a bible chapter or multiples verses on study / main view.
# Usage - STUDY:::[BIBLE_VERSION]:::[BIBLE_REFERENCE(S)]
# Remarks:
# 1) The bible version last opened on study view is opened by default if "[BIBLE_VERSION]:::" is omitted.
# 2) If "Bible Display on Study View" is enabled, bible text is opened on study view.
# 3) If "Bible Display on Study View" is disabled, bible text is opened on main view.
# 4) Common abbreviations of bible references are supported.
# Examples:
# e.g. STUDY:::John 3:16
# e.g. STUDY:::Jn 3:16; Rm 5:8; Deu 6:4
# e.g. STUDY:::KJV:::John 3:16
# e.g. STUDY:::KJV:::Jn 3:16; Rm 5:8; Deu 6:4"""),
"text": (self.textText, """
# [KEYWORD] TEXT
# Feature - Change the bible version of the last opened passage on main view.
# Usage - TEXT:::[BIBLE_VERSION]
# e.g. TEXT:::KJV
# e.g. TEXT:::NET"""),
"studytext": (self.textStudyText, """
# [KEYWORD] STUDYTEXT
# Feature - Change the bible version of the last opened passage on study view.
# Usage - STUDYTEXT:::[BIBLE_VERSION]
# e.g. STUDYTEXT:::KJV
# e.g. STUDYTEXT:::NET"""),
"compare": (self.textCompare, """
# [KEYWORD] COMPARE
# Feature - Compare bible versions of a single or multiple references.
# Usage - COMPARE:::[BIBLE_VERSION(S)]:::[BIBLE_REFERENCE(S)]
# Remarks:
# 1) All installed bible versions are opened for comparison if "[BIBLE_VERSION(S)]:::" is omitted.
# 2) Multiple bible versions for comparison are separated by "_".
# 3) If a single reference is entered and bible versions for comparison are specified, verses of the same chapter of the entered reference are opened.
# 4) Muliple verse references are supported for comparison.
# e.g. COMPARE:::John 3:16
# e.g. COMPARE:::KJV_NET_CUV:::John 3:16
# e.g. COMPARE:::KJV_NET_CUV:::John 3:16; Rm 5:8"""),
"sidebyside": (self.textCompareSideBySide, """
# [KEYWORD] SIDEBYSIDE
# Feature - Compare bible versions side by side
# Usage - SIDEBYSIDE:::[BIBLE_VERSION(S)]:::[BIBLE_REFERENCE]
# Remarks: Multiple bible versions for comparison are separated by "_"."""),
"difference": (self.textDiff, """
# [KEYWORD] DIFFERENCE
# Feature - same as [KEYWORD] DIFF
# Usage - DIFFERENCE:::[BIBLE_VERSION(S)]:::[BIBLE_REFERENCE(S)]
# Remarks:
# 1) Last-opened bible version is always displayed at the top for comparison.
# 2) All installed bible versions are opened for comparison if "[BIBLE_VERSION(S)]:::" is omitted.
# 3) Multiple bible versions for comparison are separated by "_".
# 4) Muliple verse references are supported for comparison.
# e.g. DIFFERENCE:::Joh 3:16
# e.g. DIFFERENCE:::KJV_ASV_WEB:::Joh 3:16; Rm 5:8"""),
"parallel": (self.textParallel, """
# [KEYWORD] PARALLEL
# Feature - Display bible versions of the same chapter in parallel columns.
# Usage - PARALLEL:::[BIBLE_VERSION(S)]:::[BIBLE_REFERENCE]
# Remarks:
# 1) Multiple bible versions for comparison are separated by "_".
# 2) If a single reference is entered and bible versions for comparison are specified, verses of the same chapter of the entered reference are opened.
# 3) Muliple verse references are supported for comparison.
# 4) Only the bible version last opened on main view is opened if "[BIBLE_VERSION(S)]:::" is omitted.
# e.g. PARALLEL:::NIV_CCB_CEB:::John 3:16
# e.g. PARALLEL:::NIV_CCB_CEB:::John 3:16; Rm 5:8"""),
"passages": (self.textPassages, """
# [KEYWORD] PASSAGES
# Feature - Display different bible passages of the same bible version in parallel columns. It is created for studying similar passages.
# Usage - PASSAGES:::[BIBLE_VERSION]:::[BIBLE_REFERENCE]
# Remarks:
# 1) Only the bible version last opened on main view is opened if "[BIBLE_VERSION(S)]:::" is omitted.
# 2) Only the first bible version specified in the command is taken, even multiple bible versions are entered and separated by "_".
# 3) Users can read an additional version by setting config.addFavouriteToMultiRef as True.
# 4) Book abbreviations and ranges of verses are supported for bible references.
# 5) If a chapter reference is entered, only verse 1 of the chapter specified is displayed.
# e.g. PASSAGES:::Mat 3:13-17; Mark 1:9-11; Luk 3:21-23
# e.g. PASSAGES:::KJV:::Mat 3:13-17; Mark 1:9-11; Luk 3:21-23"""),
"outline": (self.textBookOutline, """
# [KEYWORD] OUTLINE
# Feature - Display all subheadings in a bible book
# Usage - OUTLINE:::[BIBLE_BOOK]
# e.g. outline:::John"""),
"overview": (self.textChapterOverview, """
# [KEYWORD] OVERVIEW
# Feature - Display overview of a bible chapter
# Usage - OVERVIEW:::[BIBLE_CHAPTER]
# e.g. overview:::John 3"""),
"summary": (self.textChapterSummary, """
# [KEYWORD] SUMMARY
# Feature - Display summary of a bible chapter
# Usage - SUMMARY:::[BIBLE_CHAPTER]
# e.g. summary:::John 3"""),
"concordance": (self.textConcordance, """
# [KEYWORD] CONCORDANCE
# Feature - Search a Strong's number bible
# Usage - CONCORDANCE:::[BIBLE_VERSION(S)]:::[STRONG_NUMBER]
# Assigning "ALL" as "BIBLE_VERSION(S)" to search all installed Strong's number bibles.
# e.g. CONCORDANCE:::KJVx:::G3087
# e.g. CONCORDANCE:::ESVx_KJVx_NIVx_WEBx:::G3087
# e.g. CONCORDANCE:::ALL:::G3087"""),
"count": (self.textCountSearch, """
# [KEYWORD] COUNT
# Feature - Count occurrences of a string in bible books.
# Usage - COUNT:::[BIBLE_VERSION(S)]:::[LOOK_UP_STRING]:::[BIBLE_BOOKS]
# To search for a string in a bible
# e.g. COUNT:::KJV:::love
# To search with a wild card character "%"
# e.g. COUNT:::KJV:::Christ%Jesus
# To search multiple bibles, separate versions with a character "_"
# e.g. COUNT:::KJV_WEB:::love
# e.g. COUNT:::KJV_WEB:::Christ%Jesus
# To search specific books of bible
# e.g. COUNT:::KJV:::love:::Matt-John, 1Cor, Rev
# e.g. COUNT:::KJV:::temple:::OT
"""),
# semantic search requires OpenAI API key
"semantic": (self.textSemanticSearch, """
# [KEYWORD] SEMANTIC
# Feature - Bible Query via OpenAI API and Llama Index.
# Usage - SEMANTIC:::[BIBLE_VERSION]:::[QUERY]
# e.g. SEMANTIC:::KJV:::quote verses on "God created the earth"
# e.g. SEMANTIC:::KJV:::write a summary on Exodus 14
# e.g. SEMANTIC:::KJV:::compare Mark 1 and John 1
"""),
# gpt index search requires OpenAI API key
"gptsearch": (self.textGPTSEARCHSearch, """
# [KEYWORD] GPTSEARCH
# Feature - Use natural language to search bible modules.
# Usage - GPTSEARCH:::[BIBLE_VERSION]:::[QUERY]
# e.g. GPTSEARCH:::NET:::slow to speak
# e.g. GPTSEARCH:::NET:::verses contain both Jesus and love
# e.g. GPTSEARCH:::NET:::verses contain spirit but not holy
# e.g. GPTSEARCH:::NET:::faith in chapter 3
# e.g. GPTSEARCH:::verses that contain both 'God' and 'faith' in the book of Isaiah
"""),
"search": (self.textSearchBasic, """
# [KEYWORD] SEARCH
# Feature - Search bible / bibles for a string
# Usage - SEARCH:::[BIBLE_VERSION(S)]:::[LOOK_UP_STRING]:::[BIBLE_BOOKS]
# SEARCH::: is different from COUNT::: that COUNT::: shows the number of hits in individual books only whereas SEARCH::: display all texts of the result.
# e.g. SEARCH:::KJV:::love
# To work on multiple bibles, separate bible versions with a character "_":
# e.g. SEARCH:::KJV_WEB:::love
# To search specific books of bible
# e.g. SEARCH:::KJV:::love:::Matt-John, 1Cor, Rev
# e.g. SEARCH:::KJV:::temple:::OT
"""),
"searchreference": (self.textSearchReference, """
# [KEYWORD] SEARCHREFERENCE"""),
"searchtnk": (self.textSearchOT, """
# [KEYWORD] SEARCHTNK
# Feature - Search Tanakh ONLY
# Usage - SEARCHTNK:::[BIBLE_VERSION(S)]:::[LOOK_UP_STRING]
# e.g. SEARCHTNK:::KJV_WEB:::love
# e.g. SEARCHTNK:::KJV_WEB:::God%kind"""),
"searchot": (self.textSearchOT, """
# [KEYWORD] SEARCHOT
# Feature - Search O.T. ONLY
# Usage - SEARCHOT:::[BIBLE_VERSION(S)]:::[LOOK_UP_STRING]
# e.g. SEARCHOT:::KJV_WEB:::love
# e.g. SEARCHOT:::KJV_WEB:::God%kind"""),
"searchnt": (self.textSearchNT, """
# [KEYWORD] SEARCHNT
# Feature - Search N.T. ONLY
# Usage - SEARCHNT:::[BIBLE_VERSION(S)]:::[LOOK_UP_STRING]
# e.g. SEARCHNT:::KJV_WEB:::love
# e.g. SEARCHNT:::KJV_WEB:::Christ%Jesus"""),
"regexsearch": (self.textSearchRegex, """
# [KEYWORD] REGEXSEARCH
# Feature - Search bible / bibles with regular expression
# Usage - REGEXSEARCH:::[BIBLE_VERSION(S)]:::[REGEX_PATTERN]:::[BIBLE_BOOKS]
# e.g. REGEXSEARCH:::KJV:::God.*?heaven
# To search specific books of bible
# e.g. REGEXSEARCH:::KJV:::God.*?love:::Matt-John, 1Cor, Rev
# e.g. REGEXSEARCH:::KJV:::God.*?temple:::OT
"""),
"advancedsearch": (self.textSearchAdvanced, """
# [KEYWORD] ADVANCEDSEARCH
# Feature - Search bible / bibles with a sql string
# Usage - ADVANCEDSEARCH:::[BIBLE_VERSION(S)]:::[LOOK_UP_STRING]:::[BIBLE_BOOKS]
# e.g. ADVANCEDSEARCH:::KJV:::Book = 1 AND Scripture LIKE '%love%'
# To work on multiple bibles, separate bible versions with a character "_":
# e.g. ADVANCEDSEARCH:::KJV_WEB:::Book = 1 AND Scripture LIKE '%love%'"""),
"andsearch": (self.textAndSearch, """
# [KEYWORD] ANDSEARCH
# Feature - Search bible / bibles for combinations of words without taking order into consideration
# Usage - ANDSEARCH:::[BIBLE_VERSION(S)]:::[LOOK_UP_STRING]:::[BIBLE_BOOKS]
# Words are separated by a character "|" in a search string.
# e.g. ANDSEARCH:::KJV:::love|Jesus
# alias of, e.g. ADVANCEDSEARCH:::KJV:::Scripture LIKE "%love%" AND Scripture LIKE "%Jesus%" """),
"orsearch": (self.textOrSearch, """
# [KEYWORD] ORSEARCH
# Feature - Search bible / bibles for verses containing at least on of the words given in a string
# Usage - ORSEARCH:::[BIBLE_VERSION(S)]:::[LOOK_UP_STRING]:::[BIBLE_BOOKS]
# Words are separated by a character "|" in a search string.
# e.g. ORSEARCH:::KJV:::love|Jesus
# alias of, e.g. ADVANCEDSEARCH:::KJV:::Scripture LIKE "%love%" OR Scripture LIKE "%Jesus%" """),
"searchhighlight": (self.highlightSearch, """
# [KEYWORD] SEARCHHIGHLIGHT
# Feature - Search for highlight
# Usage - SEARCHHIGHLIGHT:::[COLOR]:::[BIBLE_REFERENCE]
# To search entire Bible for all highlight
# e.g. SEARCHHIGHLIGHT:::all
# To search entire Bible for yellow highlight
# e.g. SEARCHHIGHLIGHT:::yellow:::all
# e.g. SEARCHHIGHLIGHT:::yellow
# To search New Testament for blue highlight
# e.g. SEARCHHIGHLIGHT:::blue:::nt
# To search Old Testament for blue highlight
# e.g. SEARCHHIGHLIGHT:::blue:::ot
# To search Matthew for blue highlight
# e.g. SEARCHHIGHLIGHT:::hl2:::Matthew
# To search James for underline highlight
# e.g. SEARCHHIGHLIGHT:::underline:::James
# e.g. SEARCHHIGHLIGHT:::ul1:::James"""),
"index": (self.textIndex, """
# [KEYWORD] INDEX
# e.g. INDEX:::Gen 1:1"""),
"chapterindex": (self.textChapterIndex, """
# [KEYWORD] CHAPTERINDEX
# e.g. CHAPTERINDEX:::Gen 1"""),
"data": (self.textData, """
# [KEYWORD] DATA
# Feature - Display a data list into a table
# Usage - DATA:::[menu_plugin_bible_data_filename]
# e.g. DATA:::Bible Chronology"""),
"day": (self.textDay, """
# [KEYWORD] DAY
# Feature - Display 365 Day Bible Reading Content
# Usage - DAY:::[BIBLE_VERSION]:::[day_number]
# e.g. DAY:::1
# e.g. DAY:::NET:::1"""),
"dayaudio": (self.textDayAudio, """
# [KEYWORD] DAYAUDIO
# Feature - Open 365 Day Bible Reading Audio
# Usage - DAYAUDIO:::[BIBLE_VERSION]:::[day_number]
# e.g. DAYAUDIO:::1
# e.g. DAYAUDIO:::NET:::1"""),
"dayaudioplus": (self.textDayAudioPlus, """
# [KEYWORD] DAYAUDIOPLUS
# Feature - Open 365 Day Bible Reading Audio in two translations
# Usage - DAYAUDIOPLUS:::[BIBLE_VERSION(S)]:::[day_number]
# e.g. DAYAUDIOPLUS:::1
# e.g. DAYAUDIOPLUS:::NET:::1"""),
"map": (self.textMap, """
# [KEYWORD] MAP
# Feature - Open a Google map with bible locations pinned
# Usage - MAP:::[BIBLE_REFERENCE]
# e.g. MAP:::Act 15:36-18:22"""),
"locations": (self.textLocations, """
# [KEYWORD] LOCATIONS
# Feature - Customise a Google map with bible locations pinned; locations separated by |
# Usage - LOCATIONS:::[BIBLE_LOCATIONS]
# e.g. LOCATIONS:::BL634|BL636"""),
"crossreference": (self.textCrossReference, """
# [KEYWORD] CROSSREFERENCE
# e.g. CROSSREFERENCE:::Gen 1:1
# e.g. CROSSREFERENCE:::[Cross reference file]:::Rev 1:1
"""),
"tske": (self.tske, """
# [KEYWORD] TSKE
# e.g. TSKE:::Gen 1:1"""),
"commentary": (self.textCommentary, """
# [KEYWORD] COMMENTARY
# Feature - Open commentary of a bible reference.
# Usage - COMMENTARY:::[COMMENTARY_MODULE]:::[BIBLE_REFERENCE]
# Remarks:
# 1) The last opened commentary module is opened if "[COMMENTARY_MODULE]:::" is omitted.
# 2) Commentary is opened on study view.
# e.g. COMMENTARY:::John 3:16
# e.g. COMMENTARY:::CBSC:::John 3:16"""),
"commentary2": (self.textCommentary2, """
# [KEYWORD] COMMENTARY2
# Feature - Open commentary of a bible reference.
# Usage - COMMENTARY2:::[BOOK_NO].[CHAPTER_NO].[VERSE_NO]
# Usage - COMMENTARY2:::[COMMENTARY_MODULE]:::[BOOK_NO].[CHAPTER_NO].[VERSE_NO]
# Remarks:
# 1) The last opened commentary module is opened if "[COMMENTARY_MODULE]:::" is omitted.
# 2) Commentary is opened on study view.
# 3) Bible reference used with "COMMENTARY2:::" is formatted as [BOOK_NUMBER.CHAPTER_NUMBER.VERSE_NUMBER], see examples below.
# e.g. COMMENTARY2:::43.3.16
# e.g. COMMENTARY2:::CBSC:::43.3.16"""),
"distinctinterlinear": (self.distinctInterlinear, """
# [KEYWORD] DISTINCTINTERLINEAR
# e.g. DISTINCTINTERLINEAR:::G746"""),
"distincttranslation": (self.distinctTranslation, """
# [KEYWORD] DISTINCTTRANSLATION
# e.g. DISTINCTTRANSLATION:::G746"""),
"combo": (self.textCombo, """
# [KEYWORD] COMBO
# e.g. COMBO:::Gen 1:1"""),
"translation": (self.textTranslation, """
# [KEYWORD] TRANSLATION
# e.g. TRANSLATION:::Gen 1:1"""),
"discourse": (self.textDiscourse, """
# [KEYWORD] DISCOURSE
# e.g. DISCOURSE:::Gen 1:1"""),
"words": (self.textWords, """
# [KEYWORD] WORDS
# e.g. WORDS:::Gen 1:1"""),
"lexicon": (self.textLexicon, """
# [KEYWORD] LEXICON
# Usage - LEXICON:::[LEXICON_MODULE]:::[LEXICAL_ENTRY]
# Usage - LEXICON:::[LEXICON_MODULE]:::[LEXICAL_ENTRIES]
# e.g. LEXICON:::BDB:::H7225
# e.g. LEXICON:::BDB:::H7225_H123"""),
"searchlexicon": (self.searchLexicon, """
# [KEYWORD] SEARCHLEXICON
# Usage - SEARCHLEXICON:::[LEXICON_MODULE]:::[TOPIC ENTRY SEARCH]
# e.g. SEARCHLEXICON:::BDB:::H7225
# e.g. SEARCHLEXICON:::Dake-topics:::Jesus
# e.g. SEARCHLEXICON:::ALL:::Peace
"""),
"reverselexicon": (self.textReverseLexicon, """
# [KEYWORD] REVERSELEXICON
# Usage - REVERSELEXICON:::[LEXICON_MODULE]:::[DEFINITION]
# Usage - REVERSELEXICON:::[LEXICON_MODULE]:::[DEFINITION_ENTRIES]
# e.g. REVERSELEXICON:::TRLIT:::Jesus"""),
"lmcombo": (self.textLMcombo, """
# [KEYWORD] LMCOMBO
# e.g. LMCOMBO:::E70002:::ETCBC:::subs.f.sg.a"""),
"lemma": (self.textLemma, """
# [KEYWORD] LEMMA
# e.g. LEMMA:::E70002
# e.g. LEMMA:::H7225"""),
"morphologycode": (self.textMorphologyCode, """
# [KEYWORD] MORPHOLOGYCODE
# e.g. MORPHOLOGYCODE:::E70002,subs.f.sg.a"""),
"morphology": (self.textMorphology, """
# [KEYWORD] MORPHOLOGY
# e.g. MORPHOLOGY:::LexicalEntry LIKE '%E70002,%' AND Morphology LIKE '%feminine%'"""),
"searchmorphology": (self.textSearchMorphology, """
# [KEYWORD] SEARCHMORPHOLOGY
# e.g. SEARCHMORPHOLOGY:::E70002:::feminine
# alias of e.g. MORPHOLOGY:::LexicalEntry LIKE '%E70002,%' AND (Morphology LIKE "%feminine%")
# e.g. SEARCHMORPHOLOGY:::E70002:::feminine|noun
# alias of e.g. MORPHOLOGY:::LexicalEntry LIKE '%E70002,%' AND (Morphology LIKE "%feminine%" OR Morphology LIKE "%noun%")"""),
"searchmorphologybylex": (self.searchMorphologyByLex, """
# [KEYWORD] SEARCHMORPHOLOGYBYLEX
# e.g. SEARCHMORPHOLOGYBYLEX:::G2424:::Noun,Nominative,Masculine
# e.g. SEARCHMORPHOLOGYBYLEX:::G2424:::Noun,Nominative,Masculine:::40-66
"""),
"searchmorphologybyword": (self.searchMorphologyByWord, """
# [KEYWORD] SEARCHMORPHOLOGYBYWORD
# e.g. SEARCHMORPHOLOGYBYWORD:::Ἰησοῦς:::Noun,Dative,Masculine
# e.g. SEARCHMORPHOLOGYBYWORD:::Ἰησοῦς:::Noun,Dative,Masculine:::40
"""),
"searchmorphologybygloss": (self.searchMorphologyByGloss, """
# [KEYWORD] SEARCHMORPHOLOGYBYGLOSS
# e.g. SEARCHMORPHOLOGYBYGLOSS:::Joshua:::Noun,Dative,Masculine
# e.g. SEARCHMORPHOLOGYBYGLOSS:::Joshua:::Noun,Dative,Masculine:::1-66
"""),
"word": (self.textWordData, """
# [KEYWORD] WORD
# Usage - WORD:::[BOOK_NO]:::[WORD_NO]
# e.g. WORD:::1:::2"""),
"clause": (self.textClause, """
# [KEYWORD] CLAUSE
# e.g. Embed in the first clause of Gen 1:1 in MAB
# e.g. CLAUSE:::1.1.1:::1"""),
"searchtool": (self.textSearchTool, """
# [KEYWORD] SEARCHTOOL
# e.g. SEARCHTOOL:::EXLBP:::Jesus
# e.g. SEARCHTOOL:::HBN:::Jesus
# e.g. SEARCHTOOL:::EXLBL:::Jerusalem
# e.g. SEARCHTOOL:::EXLBT:::faith
# e.g. SEARCHTOOL:::EAS:::faith
# e.g. SEARCHTOOL:::ISB:::faith
# e.g. SEARCHTOOL:::mETCBC:::prep"""),
"exlb": (self.textExlb, """
# [KEYWORD] EXLB
# e.g. EXLB:::exlbp:::BP904
# e.g. EXLB:::exlbl:::BL636"""),
"dictionary": (self.textDictionary, """
# [KEYWORD] DICTIONARY
# e.g. DICTIONARY:::EAS1312"""),
"encyclopedia": (self.textEncyclopedia, """
# [KEYWORD] ENCYCLOPEDIA
# e.g. ENCYCLOPEDIA:::ISB:::ISBE3333"""),
"searchthirddictionary": (self.thirdDictionarySearch, """
# [KEYWORD] SEARCHTHIRDDICTIONARY
# e.g. SEARCHTHIRDDICTIONARY:::faith
# e.g. SEARCHTHIRDDICTIONARY:::webster:::faith"""),
"thirddictionary": (self.thirdDictionaryOpen, """
# [KEYWORD] THIRDDICTIONARY
# e.g. THIRDDICTIONARY:::webster:::FAITH"""),
"book": (self.textBook, """
# [KEYWORD] BOOK
# Usage - BOOK:::[BOOK_MODULE]:::[OPTIONAL_TOPIC]
# To view all the available topics of a book
# e.g. BOOK:::Timelines
# To specify a particular topic
# e.g. BOOK:::Timelines:::2210-2090_BCE"""),
"searchbook": (self.textSearchBook, """
# [KEYWORD] SEARCHBOOK
# To search the last opened book module
# e.g. SEARCHBOOK:::Abraham
# To search a particular book module
# e.g. SEARCHBOOK:::OT_History1:::Abraham
# To search mutliple books, separate book modules with a comma ",".
# e.g. SEARCHBOOK:::OT_History1,OT_History2:::Abraham
# To search all favourite book modules
# e.g. SEARCHBOOK:::FAV:::Jerusalem
# To search all installed book modules
# e.g. SEARCHBOOK:::ALL:::Jerusalem
# Remarks: Module creator should avoid comma for naming a book module."""),
"searchbookchapter": (self.textSearchBookChapter, """
# [KEYWORD] SEARCHBOOKCHAPTER
# similar to searchbook:::, difference is that "searchbookchapter:::" searches chapters only
# e.g. SEARCHBOOKCHAPTER:::Bible_Promises:::index"""),
"searchallbookspdf": (self.textSearchAllBooksAndPdf, """
# [KEYWORD] SEARCHALLBOOKSPDF
# Search all books and all PDF files"""),
"cmd": (self.osCommand, """
# [KEYWORD] cmd
# Feature - Run an os command
# Warning! Make sure you know what you are running before you use this keyword. The running command may affect data outside UniqueBible folder.
# Remarks: This command works ONLY when config.enableCmd is set to True.
# Examples on Windows:
# e.g. cmd:::notepad
# e.g. cmd:::start latest_changes.txt
# Examples on macOS:
# e.g. cmd:::open latest_changes.txt
# e.g. cmd:::open "~/Applications/Visual Studio Code.app"/
# Examples on Linux:
# e.g. cmd:::firefox
# e.g. cmd:::mkdir -p myNotes; cd myNotes; gedit test.txt
# e.g. cmd:::rm -rf myNotes
# e.g. cmd:::google-chrome https://uniquebible.app"""),
"speak": (self.textToSpeech, """
# [KEYWORD] SPEAK
# Feature: run text-to-speech function
# e.g. SPEAK:::All Scripture is inspired by God
# e.g. SPEAK:::en-gb:::All Scripture is inspired by God
# e.g. SPEAK:::zh-cn:::聖經都是上帝所默示的
# e.g. SPEAK:::zh-tw:::聖經都是上帝所默示的"""),
"gtts": (self.googleTextToSpeech, """
# [KEYWORD] GTTS
# Feature: run text-to-speech function
# e.g. GTTS:::All Scripture is inspired by God
# e.g. GTTS:::en:::All Scripture is inspired by God
# e.g. GTTS:::zh:::聖經都是上帝所默示的"""),
"mp3": (self.mp3Download, """
# [KEYWORD] MP3
# Feature: run yt-dlp to download mp3 from youtube, provided that yt-dlp is installed on user's system
# Usage - MP3:::[youtube_link]"""),
"mp4": (self.mp4Download, """
# [KEYWORD] MP4
# Usage - MP4:::[youtube_link]"""),
"media": (self.openMediaPlayer, """
# [KEYWORD] MEDIA
# Feature: run media player to play mp3 and mp4 files
# e.g. MEDIA:::music/AmazingGrace.mp3
# e.g. MEDIA:::video/ProdigalSon.mp4
"""),
"read": (self.textRead, """
# [KEYWORD] READ
# Feature - Read a single bible passage or multiple bible passages.
# Usage - READ:::[BIBLE_VERSION(S)]:::[BIBLE_REFERENCE(S)]
# Remarks:
# 1) The bible version last opened on main view is opened by default if "[BIBLE_VERSION]:::" is omitted.
# e.g. READ:::Jn 3:16-18
# e.g. READ:::KJV:::Jn 3:16-18; Deut 6:4
# e.g. READ:::KJV_CUV:::Jn 3:16-18; Deut 6:4
"""),
"readsync": (self.textReadSync, """
# [KEYWORD] READSYNC
# Feature - Read a single bible passage or multiple bible passages, with text display synchronisation.
# Usage - READSYNC:::[BIBLE_VERSION(S)]:::[BIBLE_REFERENCE(S)]
# Remarks:
# 1) The bible version last opened on main view is opened by default if "[BIBLE_VERSION]:::" is omitted.
# e.g. READSYNC:::Jn 3:16-18
# e.g. READSYNC:::KJV:::Jn 3:16-18; Deut 6:4
# e.g. READSYNC:::KJV_CUV:::Jn 3:16-18; Deut 6:4
"""), #textReadSync
"readchapter": (self.readChapter, """
# [KEYWORD] READCHAPTER
# Feature: read a bible chapter verse by verse
# e.g. READCHAPTER:::CUV.1.1
"""),
"readverse": (self.readVerse, """
# [KEYWORD] READVERSE
# Feature: read a bible verse
# e.g. READVERSE:::CUV.1.1.1
"""),
"readword": (self.readWord, """
# [KEYWORD] READWORD
# Feature: read a word
# Usage - READWORD:::[BIBLE_VERSION].[BOOK_NO].[CHAPTER_NO].[VERSE_NO].[WORD_NO]
# e.g. READWORD:::BHS5.1.1.1.1
"""),
"readlexeme": (self.readLexeme, """
# [KEYWORD] READLEXEME
# Feature: read a lexeme
# Usage - READLEXEME:::[BIBLE_VERSION].[BOOK_NO].[CHAPTER_NO].[VERSE_NO].[WORD_NO]
# e.g. READLEXEME:::BHS5.1.1.1.1
"""),
"readbible": (self.readBible, """
# [KEYWORD] READBIBLE
# Feature: Play Bible mp3 file recording of a chapter
# mp3 files should be placed under audio/bibles/[Bible Text]/default/[Chapter number]/
# for example, audio/bibles/KJV/default/40/
# each file should be a recording of a chapter with the filename "[Book number]_[Name][Chapter number].mp3
# for example, 40_Matthew001.mp3. Chapter numbers should be three digits (eg `001`).
# mp3 files can be downloaded from https://www.audiotreasure.com/audioindex.htm
# Usage:
# e.g. READBIBLE::: # Reads current Bible and current chapter
# e.g. READBIBLE:::@soft-music # Reads from soft-music folder instead of default folder
# e.g. READBIBLE:::Matt. 1 # Reads chapter from current Bible
# e.g. READBIBLE:::Matt. 1,John. 1 # Reads chapters from current Bible
# e.g. READBIBLE:::Matt. 1-28 # Reads chapters from current Bible
# e.g. READBIBLE:::KJV:::Matt. 1 # Reads chapter from Bible
# e.g. READBIBLE:::KJV:::Matt. 1,Matt. 2 # Reads chapters from Bible
# e.g. READBIBLE:::KJV:::Matt. 1-4:::soft-music # Reads from soft-music folder instead of default folder
"""),
"opennote": (self.textOpenNoteFile, """
# [KEYWORD] opennote
# e.g. opennote:::file_path"""),
"open": (self.openExternalFile, """
# [KEYWORD] open
# open::: is different from opennote::: that open::: uses system default application to open the file.
# e.g. open:::."""),
"pdf": (self.openPdfReader, """
# [KEYWORD] PDF
# Feature: Open PDF file, located in directory marvelData/pdf/
# Usage - PDF:::[PDF_filename]
# Usage - PDF:::[PDF_filename]:::[page_number]
# e.g. PDF:::Newton - Olney Hymns.pdf:::110"""),
"pdffind": (self.openPdfReaderFind, """
# [KEYWORD] PDFFIND
# Feature: Open PDF file, located in directory marvelData/pdf/ and search for text
# Usage - PDFFIND:::[PDF_filename]:::[TEXT]
# e.g. PDFFIND:::Newton - Olney Hymns.pdf:::Amazing"""),
"searchpdf": (self.searchPdf, """
# [KEYWORD] SEARCHPDF
# Feature: Search for text inside PDFs in marvelData/pdf/
# Usage - SEARCHPDF:::[TEXT]
# e.g. SEARCHPDF:::Jesus"""),
"anypdf": (self.openPdfReaderFullpath, """
# [KEYWORD] ANYPDF
# Feature: Open PDF file, located in local device where users have read permission.
# Remarks: It works basically the same as keyword PDF:::, except ANYPDF::: accepts full pdf file path only.
# Usage - ANYPDF:::[PDF_filename_fullpath]
# Usage - ANYPDF:::[PDF_filename_fullpath]:::[page_number]
# e.g. ANYPDF:::file.pdf:::110"""),
"epub": (self.openEpubReader, """
# [KEYWORD] EPUB
# Feature: Open EPUB file, located in directory marvelData/epub/
# Usage - EPUB:::[EPUB_filename]"""),
"docx": (self.openDocxReader, """
# [KEYWORD] DOCX
# Feature: Open Word Document
# Usage - DOCX:::[DOCX_filename]
# e.g. DOCX:::test.docx"""),
"translate": (self.translateText, """
# [KEYWORD] TRANSLATE
# Feature - Use IBM Watson service to translate entered
# It works only if user install python package 'ibm-watson'
# Usage - TRANSLATE:::[text to be translated]
# Usage - TRANSLATE:::[source_language_code]-[target_language_code]:::[text to be translated]
# Language code of config.userLanguage is used by default if language code is not provided. If config.userLanguage is not defined, "en" is used.
# e.g. TRANSLATE:::測試
# e.g. TRANSLATE:::en-zh:::test"""),
"openbooknote": (self.openBookNoteRef, """
# [KEYWORD] openbooknote
# e.g. openbooknote:::John"""),
"openchapternote": (self.openChapterNoteRef, """
# [KEYWORD] openchapternote
# e.g. openchapternote:::John 3"""),
"openversenote": (self.openVerseNoteRef, """
# [KEYWORD] openversenote
# e.g. openversenote:::John 3:16"""),
"editbooknote": (self.editBookNoteRef, """
# [KEYWORD] editbooknote
# e.g. editbooknote:::John"""),
"editchapternote": (self.editChapterNoteRef, """
# [KEYWORD] editchapternote
# e.g. editchapternote:::John 3"""),
"editversenote": (self.editVerseNoteRef, """
# [KEYWORD] editversenote
# e.g. editversenote:::John 3:16"""),
"searchbooknote": (self.textSearchBookNote, """
# [KEYWORD] SEARCHBOOKNOTE
# e.g. SEARCHBOOKNOTE:::faith"""),
"searchchapternote": (self.textSearchChapterNote, """
# [KEYWORD] SEARCHCHAPTERNOTE
# e.g. SEARCHCHAPTERNOTE:::faith"""),
"searchversenote": (self.textSearchVerseNote, """
# [KEYWORD] SEARCHVERSENOTE
# e.g. SEARCHVERSENOTE:::faith"""),
"openjournal": (self.openJournalNote, """
# [KEYWORD] OPENJOURNAL
# Feature - Open personal journal
# Usage - OPENJOURNAL:::
# Usage - OPENJOURNAL:::[year]-[month]-[day]
# Remarks: Journal of the day is opened by default when a day is not specified.
# e.g. OPENJOURNAL:::
# e.g. OPENJOURNAL:::2022-12-25"""),
"editjournal": (self.editJournalNote, """
# [KEYWORD] EDITJOURNAL
# Feature - Open personal journal in text editor
# Usage - EDITJOURNAL:::
# Usage - EDITJOURNAL:::[year]-[month]-[day]
# Remarks: Journal of the day is opened in text editor by default when a day is not specified.
# e.g. EDITJOURNAL:::
# e.g. EDITJOURNAL:::2022-12-25"""),
"searchjournal": (self.searchJournalNote, """
# [KEYWORD] SEARCHJOURNAL
# Feature - Search personal journal
# Usage - SEARCHJOURNAL:::[LOOK_UP_STRING]
# e.g. SEARCHJOURNAL:::faith"""),
"download": (self.download, """
# [KEYWORD] DOWNLOAD
# Feature - Download marvel data, github files
# Usage - DOWNLOAD:::[source]:::[file]
# Available sources: ["MarvelData", "MarvelBible", "MarvelCommentary", "GitHubBible", "GitHubCommentary", "GitHubBook", "GitHubMap", "GitHubPdf", "GitHubEpub"]
# e.g. DOWNLOAD:::marvelbible:::KJV
"""),
"import": (self.importResources, """
# [KEYWORD] IMPORT
# Feature - Import third party resources
# Usage - IMPORT:::
# Usage - IMPORT:::[directory_path_containing_supported_3rd_party_files]
# Remarks: If a directory is not specified, "import" is used by default.
# e.g. IMPORT:::import
"""),
"devotional": (self.openDevotional, """
# [KEYWORD] DEVOTIONAL
# Feature - Open today's devotional entry
# e.g. DEVOTIONAL:::Meyer
"""),
"displaywordfrequency": (self.displayWordFrequency, """
# [KEYWORD] DISPLAYWORDFREQUENCY
# Feature - Displays the word frequency for Bibles with Strongs numbers
# and highlights with different colors based on frequency
# Usage - DISPLAYWORDFREQUENCY:::[BIBLE_VERSION]:::[BIBLE_REFERENCE(S)]
# This will only highlight Bibles that contain Strongs numbers
"""),
#
# Keywords starting with "_" are mainly internal commands for GUI operations
# They are not recorded in history records.
#
"_imv": (self.instantMainVerse, """
# [KEYWORD] _imv
# Feature - Display a single verse text. It takes book, chapter and verse numbers.
# Usage - _imv:::[BOOK_NO].[CHAPTER_NO].[VERSE_NO]
# e.g. _imv:::1.1.1
# e.g. _imv:::43.3.16"""),
"_imvr": (self.instantMainVerseReference, """
# [KEYWORD] _imvr
# Feature - Display a single verse text. It takes a bible reference.
# Usage - _imv:::[BIBLE_REFERENCE]
# e.g. _imvr:::Gen 1:1
# e.g. _imvr:::John 3:16"""),
"_instantverse": (self.instantVerse, """
# [KEYWORD] _instantverse
# Feature - Display interlinear verse text on bottom window.
# OHGB_WORD_ID is optional. Corresponding Hebrew / Greek word is highlighted if OHGB_WORD_ID is given.
# Usage: _instantverse:::[BOOK_NO].[CHAPTER_NO].[VERSE_NO]
# Usage: _instantverse:::[BOOK_NO].[CHAPTER_NO].[VERSE_NO].[OHGB_WORD_ID]
# e.g. _instantVerse:::1.1.1
# e.g. _instantVerse:::1.1.1.1"""),
"_instantword": (self.instantWord, """
# [KEYWORD] _instantword
# e.g. _instantWord:::1:::h2"""),
"_lexicaldata": (self.instantLexicalData, """
# [KEYWORD] _lexicaldata
# Feature - Display lexical data on bottom window.
# Usage: _lexicaldata:::[LEXICAL_ENTRY]
# e.g. _lexicaldata:::G1234"""),
"_vnsc": (self.verseNoSingleClick, """
# [KEYWORD] _vnsc
# Feature -Verse number single-click action
# e.g. _vnsc:::KJV.43.3.16.John 3:16"""),
"_vndc": (self.verseNoDoubleClick, """
# [KEYWORD] _vndc
# Feature - Verse number double-click action
# e.g. _vnsc:::KJV.43.3.16"""),
"_menu": (self.textMenu, """
# [KEYWORD] _menu
# Feature - Open UBA classic html menu
# e.g. _menu:::
# e.g. _menu:::43
# e.g. _menu:::43.3
# e.g. _menu:::43.3.16"""),
"_comparison": (self.textComparisonMenu, """
# [KEYWORD] _comparison
# Feature - Open html menu for bible version comparison
# e.g. _comparison:::"""),
"_chapters": (self.textChapters, """
# [KEYWORD] _chapters
# Feature - Display all available chapters of a bible version.
# Usage - _chapters:::[BIBLE_VERSION]
# e.g. _chapters:::KJV
# e.g. _chapters:::NET"""),
"_verses": (self.textVerses, """
# [KEYWORD] _verses
# Feature - Display all available verses of a bible chapter.
# Usage - _verses:::[BIBLE_VERSION]:::[BIBLE_REFERENCE]
# e.g. _verses:::Jn 3
# e.g. _verses:::KJV:::Jn 3
# e.g. _verses:::NET:::Jn 3"""),
"_commentaries": (self.textCommentaries, """
# [KEYWORD] _commentaries
# Feature - Display all available commentary modules.
# Usage - _commentaries:::
# e.g. _commentaries:::"""),
"_commentarychapters": (self.textCommentaryChapters, """
# [KEYWORD] _commentarychapters
# Feature - Display commentary chapter menu.
# Usage - _commentarychapters:::[COMMENTARY]
# e.g. _commentarychapters:::BI
# e.g. _commentarychapters:::CBSC"""),
"_commentaryverses": (self.textCommentaryVerses, """
# [KEYWORD] _commentaryverses
# Feature - Display commentary verse menu.
# Usage - _commentaryverses:::[COMMENTARY]:::[BIBLE_REFERENCE]
# e.g. _commentaryverses:::Jn 3
# e.g. _commentaryverses:::BI:::Jn 3
# e.g. _commentaryverses:::CBSC:::Jn 3"""),
"_commentary": (self.textCommentaryMenu, """
# [KEYWORD] _commentary
# e.g. _commentary:::CBSC.1.1.1"""),
"_book": (self.textBookMenu, """
# [KEYWORD] _book
# e.g. _book:::"""),
"_info": (self.textInfo, """
# [KEYWORD] _info
# e.g. _info:::Genesis"""),
"_bibleinfo": (self.textBibleInfo, """
# [KEYWORD] _bibleinfo
# e.g. _bibleinfo:::KJV"""),
"_commentaryinfo": (self.textCommentaryInfo, """
# [KEYWORD] _commentaryinfo
# e.g. _commentaryinfo:::CBSC"""),
"_command": (self.textCommand, """
# [KEYWORD] _command
# e.g. _command:::testing"""),
"_history": (self.textHistory, """
# [KEYWORD] _history
# e.g. _history:::main
# e.g. _history:::study"""),
"_historyrecord": (self.textHistoryRecord, """
# [KEYWORD] _historyrecord
# e.g. _historyrecord:::1"""),
"_image": (self.textImage, """
# [KEYWORD] _image
# e.g. _image:::EXLBL:::1.jpg"""),
"_htmlimage": (self.textHtmlImage, """
# [KEYWORD] _htmlimage
# Feature - open image file located in 'htmlResources/images/'
# Usage - _htmlimage:::[filepath_relative_to_images_directory]
# e.g. _htmlimage:::exlbl_largeHD/BL1263.png"""),
"_openbooknote": (self.openBookNote, """
# [KEYWORD] _openbooknote
# Feature - open bible book note
# Usage - _openbooknote:::[BOOK_NO]
# e.g. _openbooknote:::43"""),
"_openchapternote": (self.openChapterNote, """
# [KEYWORD] _openchapternote
# Feature - open bible chapter note
# Usage - _openchapternote:::[BOOK_NO].[CHAPTER_NO]
# e.g. _openchapternote:::43.3"""),
"_openversenote": (self.openVerseNote, """
# [KEYWORD] _openversenote
# Feature - open bible verse note
# Usage - _openversenote:::[BOOK_NO].[CHAPTER_NO].[VERSE_NO]
# e.g. _openversenote:::43.3.16"""),
"_editbooknote": (self.editBookNote, """
# [KEYWORD] _editbooknote
# Feature - edit bible book note
# Usage - _editbooknote:::[BOOK_NO]
# e.g. _editbooknote:::43"""),
"_editchapternote": (self.editChapterNote, """
# [KEYWORD] _editchapternote
# Feature - edit bible chapter note
# Usage - _editchapternote:::[BOOK_NO].[CHAPTER_NO]
# e.g. _editchapternote:::43.3"""),
"_editversenote": (self.editVerseNote, """
# [KEYWORD] _editversenote
# Feature - edit bible verse note
# Usage - _editversenote:::[BOOK_NO].[CHAPTER_NO].[VERSE_NO]
# e.g. _editversenote:::43.3.16"""),
"_open": (self.openMarvelDataFile, """
# [KEYWORD] _open
# open a file inside marvelData folder
# e.g. _open:::.
# e.g. _open:::bibles"""),
"_openfile": (self.textOpenFile, """
# [KEYWORD] _openfile
# Usage: _openfile:::[external_note_history_record_index]
# e.g. _openfile:::-1
# Remarks: -1 is the latest record"""),
"_editfile": (self.textEditFile, """
# [KEYWORD] _editfile
# Usage: _editfile:::[external_note_history_record_index]
# e.g. _editfile:::-1
# Remarks: -1 is the latest record"""),
"_website": (self.textWebsite, """
# [KEYWORD] _website
# e.g. _website:::https://marvel.bible"""),
"_uba": (self.textUba, """
# [KEYWORD] _uba
# e.g. _uba:::file://notes.uba
# e.g. _uba:::file://note_editor_key_combo.uba"""),
"_biblenote": (self.textBiblenote, """
# [KEYWORD] _biblenote
# Feature - retrieve bible module note(s) of a single verse.
# Usage - _biblenote:::[BIBLE_VERSION].[BOOK_NO].[CHAPTER_NO].[VERSE_NO]
# Usage - _biblenote:::[BIBLE_VERSION].[BOOK_NO].[CHAPTER_NO].[VERSE_NO].[NOTE_INDICATOR]
# e.g. _biblenote:::KJV:::1.1.1
# e.g. _biblenote:::KJV:::1.1.1.1"""),
"_wordnote": (self.textWordNote, """
# [KEYWORD] _wordnote
# e.g. _wordnote:::LXX1:::l1"""),
"_searchword": (self.textSearchWord, """
# [KEYWORD] _searchword
# Usage: _searchword:::[1=OT, 2=NT]:::[wordID]
# e.g. _searchword:::1:::1"""),
"_harmony": (self.textHarmony, """
# [KEYWORD] _harmony
# Feature - Display verses from a harmony collection.
# Usage - _harmony:::[collection_number].[entry_number]
# e.g. _harmony:::4.1"""),
"_promise": (self.textPromise, """
# [KEYWORD] _promise
# Feature - Display verses from a bible collection.
# Usage - _promise:::[collection_number].[entry_number]
# e.g. _promise:::4.1"""),
"_paste": (self.pasteFromClipboard, """
# [KEYWORD] _paste
# Feature - Display clipboard text.
# e.g. _paste:::"""),
"_mastercontrol": (self.openMasterControl, """
# [KEYWORD] _mastercontrol
# Usage: _mastercontrol:::
# Usage: _mastercontrol:::[0-4]"""),
"_highlight": (self.highlightVerse, """
# [KEYWORD] _highlight
# Feature - Highlight a verse
# Usage - _highlight:::[code]:::[BIBLE_REFERENCE(S)]
# Examples:
# e.g. _highlight:::hl1:::John 3:16
# e.g. _highlight:::hl2:::John 3:16
# e.g. _highlight:::ul1:::John 3:16
# e.g. _highlight:::delete:::John 3:16"""),
"_savepdfcurrentpage": (self.savePdfCurrentPage, """
# [KEYWORD] _savePdfCurrentPage
# Feature - Save the current page of PDF
# Usage - _savePdfCurrentPage:::[page]
# Example:
# e.g. _savePdfCurrentPage:::100"""),
"_setconfig": (self.textSetConfig, """
# [KEYWORD] _setconfig
# Feature - Set a config value in config.py.
# Usage - _setconfig:::[item]:::[value]
# WARNING! Do NOT use this command unless you know well about config.py. A mistake can prevent UBA from startup.
# Remarks: This command works ONLY when config.developer or config.webFullAccess is set to True.
# Remarks: All configurable settings are displayed if both item and value are not provided.
# Remarks: Help content about an item if an item is provided without value.
# Remarks: Use single quotation mark ' for string value.
# Example:
# e.g. _setconfig:::
# e.g. _setconfig:::favouriteBible
# e.g. _setconfig:::favouriteBible:::'BSB'"""),
"_fixlinksincommentary": (self.fixLinksInCommentary, """
# Usage - _fixlinksincommentary:::[commentary]
# Example:
# _fixlinksincommentary:::Dakes
"""),
"_copy": (self.copyText, """
# Usage - _copy:::[text]
# Remarks: This commands works only on desktop or webtop version.
# Example:
# _copy:::Unique Bible App
"""),
"_whatis": (self.textWhatIs, """
# [KEYWORD] _whatis
# Feature - Display brief description about a command keyword
# Usage - _whatis:::[command_keyword]
# e.g. _whatis:::bible
# e.g. _whatis:::read"""),
}
for key, value in BibleBooks.abbrev["eng"].items():
book = value[0]
self.interpreters[book.lower()] = (partial(self.textSearchSingleBook, key), """
# [KEYWORD] {0}
# Feature - Search '{0}' ONLY
# Usage - {0}:::[BIBLE_VERSION(S)]:::[LOOK_UP_STRING]
# e.g. {0}:::KJV:::love""".format(book))
def parser(self, textCommand, source="main"):
commandList = self.splitCommand(textCommand)
updateViewConfig, viewText, *_ = self.getViewConfig(source)
if len(commandList) == 1:
textCommand = textCommand.strip()
if self.isDatabaseInstalled("bible"):
self.lastKeyword = "bible"
return self.textBibleVerseParser(textCommand, viewText, source)
else:
return self.databaseNotInstalled("bible")
else:
keyword, command = commandList
keyword = keyword.lower()
if keyword in ("bible", "study", "text") and config.runMode == "terminal":
config.terminalBibleComparison = False
if config.runMode == "terminal" and keyword in config.mainWindow.unsupportedCommands:
return ("study", f"{keyword}::: command is currently not supported in terminal mode.", {})
if keyword in self.interpreters:
if self.isDatabaseInstalled(keyword):
command = command.strip()
if not command:
currentBibleReference = self.bcvToVerseReference(config.mainB, config.mainC, config.mainV)
if keyword in ("bible", "study", "compare", "crossreference", "diff", "difference", "tske", "translation", "discourse", "words", "combo", "commentary", "index", "openversenote", "displaywordfrequency"):
command = currentBibleReference
print(f"Running '{keyword}:::{command}' ...")
elif keyword in ("openbooknote",):
command = re.sub(" [0-9]+?:[0-9]+?$", "", currentBibleReference)
print(f"Running '{keyword}:::{command}' ...")
elif keyword in ("openchapternote", "overview", "summary", "chapterindex"):
command = currentBibleReference.split(":", 1)[0]
print(f"Running '{keyword}:::{command}' ...")
elif not keyword in ("_mastercontrol", "_paste", "_commentaries", "commentary2", "_comparison", "_menu", "import", "_setconfig", "openjournal", "editjournal", "searchjournal", "searchbooknote", "searchchapternote", "searchversenote", "_openbooknote", "_openchapternote", "_openversenote", "_editbooknote", "_editchapternote", "_editversenote", "_vnsc", "_vndc"):
return self.textWhatIs(keyword, source)
self.lastKeyword = keyword
return self.interpreters[keyword][0](command, source)
else:
return self.databaseNotInstalled(keyword)
else:
if self.isDatabaseInstalled("bible"):
self.lastKeyword = "bible"
return self.textBibleVerseParser(textCommand, viewText, source)
else:
return self.databaseNotInstalled("bible")
# check if a particular database is installed
def databaseInfo(self):
return {
"_menu": self.getCoreBiblesInfo(),
"_instantverse": self.getCoreBiblesInfo(),
"_instantword": self.getCoreBiblesInfo(),
"_bibleinfo": self.getCoreBiblesInfo(),
"main": self.getCoreBiblesInfo(),
"study": self.getCoreBiblesInfo(),
"bible": self.getCoreBiblesInfo(),
"text": self.getCoreBiblesInfo(),
"compare": self.getCoreBiblesInfo(),
"parallel": self.getCoreBiblesInfo(),
"passages": self.getCoreBiblesInfo(),
"diff": self.getCoreBiblesInfo(),
"difference": self.getCoreBiblesInfo(),
"count": self.getCoreBiblesInfo(),
"search": self.getCoreBiblesInfo(),
"semantic": self.getCoreBiblesInfo(),
"gptsearch": self.getCoreBiblesInfo(),
"advancedsearch": self.getCoreBiblesInfo(),
"andsearch": self.getCoreBiblesInfo(),
"orsearch": self.getCoreBiblesInfo(),
"lemma": self.getCoreBiblesInfo(),
"morphologycode": self.getCoreBiblesInfo(),
"morphology": self.getCoreBiblesInfo(),
"searchmorphology": self.getCoreBiblesInfo(),
"_commentary": self.getLastCommentaryInfo(),
"commentary": self.getLastCommentaryInfo(),
"_openchapternote": self.getBibleNoteInfo(),
"_openversenote": self.getBibleNoteInfo(),
"_editchapternote": self.getBibleNoteInfo(),
"_editversenote": self.getBibleNoteInfo(),
"searchchapternote": self.getBibleNoteInfo(),
"searchversenote": self.getBibleNoteInfo(),
"overview": self.getCollectionsInfo(),
"summary": ((config.marvelData, "commentaries", "cBrooks.commentary"), "1pZNRYE6LqnmfjUem4Wb_U9mZ7doREYUm"),
"_harmony": self.getCollectionsInfo(),
"_promise": self.getCollectionsInfo(),
"_book": self.getBookInfo(),
"book": self.getBookInfo(),
"searchbook": self.getBookInfo(),
"searchbookchapter": self.getBookInfo(),
"crossreference": self.getXRefInfo(),
"tske": self.getXRefInfo(),
"_image": ((config.marvelData, "images.sqlite"), "1_fo1CzhzT6h0fEHS_6R0JGDjf9uLJd3r"),
"index": ((config.marvelData, "indexes2.sqlite"), "1hY-QkBWQ8UpkeqM8lkB6q_FbaneU_Tg5"),
"chapterindex": ((config.marvelData, "indexes2.sqlite"), "1hY-QkBWQ8UpkeqM8lkB6q_FbaneU_Tg5"),
"searchtool": ((config.marvelData, "search.sqlite"), "1A4s8ewpxayrVXamiva2l1y1AinAcIKAh"),
"word": ((config.marvelData, "data", "wordNT.data"), "11pmVhecYEtklcB4fLjNP52eL9pkytFdS"),
"clause": ((config.marvelData, "data", "clauseNT.data"), "11pmVhecYEtklcB4fLjNP52eL9pkytFdS"),
"translation": ((config.marvelData, "data", "translationNT.data"), "11bANQQhH6acVujDXiPI4JuaenTFYTkZA"),
"discourse": ((config.marvelData, "data", "discourseNT.data"), "11bANQQhH6acVujDXiPI4JuaenTFYTkZA"),
"words": ((config.marvelData, "data", "wordsNT.data"), "11bANQQhH6acVujDXiPI4JuaenTFYTkZA"),
"combo": ((config.marvelData, "data", "wordsNT.data"), "11bANQQhH6acVujDXiPI4JuaenTFYTkZA"),
"lexicon": ((config.marvelData, "lexicons", "MCGED.lexicon"), "157Le0xw2ovuoF2v9Bf6qeck0o15RGfMM"),
"exlb": ((config.marvelData, "data", "exlb3.data"), "1gp2Unsab85Se-IB_tmvVZQ3JKGvXLyMP"),
"dictionary": ((config.marvelData, "data", "dictionary.data"), "1NfbkhaR-dtmT1_Aue34KypR3mfPtqCZn"),
"encyclopedia": ((config.marvelData, "data", "encyclopedia.data"), "1OuM6WxKfInDBULkzZDZFryUkU1BFtym8"),
}
def getCoreBiblesInfo(self):
return ((config.marvelData, "images.sqlite"), "1-aFEfnSiZSIjEPUQ2VIM75I4YRGIcy5-")
def getBibleNoteInfo(self):
return ((config.marvelData, "note.sqlite"), "1OcHrAXLS-OLDG5Q7br6mt2WYCedk8lnW")
def getCollectionsInfo(self):
return ((config.marvelData, "collections3.sqlite"), "18dRwEc3SL2Z6JxD1eI1Jm07oIpt9i205")
def getBookInfo(self):
return ((config.marvelData, "books", "Maps_ABS.book"), "13hf1NvhAjNXmRQn-Cpq4hY0E2XbEfmEd")
def getXRefInfo(self):
return ((config.marvelData, "cross-reference.sqlite"), "1fTf0L7l1k_o1Edt4KUDOzg5LGHtBS3w_")
def getLastCommentaryInfo(self):
return ((config.marvelData, "commentaries", "c{0}.commentary".format(config.commentaryText)), self.getCommentaryCloudID(config.commentaryText))
def getMarvelBibles(self):
return self.parent.bibleInfo
def getCommentaryCloudID(self, commentary):
cloudIDs = {
"Barnes": "13uxButnFH2NRUV-YuyRZYCeh1GzWqO5J",
"Benson": "1MSRUHGDilogk7_iZHVH5GWkPyf8edgjr",
"BI": "1DUATP_0M7SwBqsjf20YvUDblg3_sOt2F",
"Brooks": "1pZNRYE6LqnmfjUem4Wb_U9mZ7doREYUm",
"Calvin": "1FUZGK9n54aXvqMAi3-2OZDtRSz9iZh-j",
"CBSC": "1IxbscuAMZg6gQIjzMlVkLtJNDQ7IzTh6",
"CECNT": "1MpBx7z6xyJYISpW_7Dq-Uwv0rP8_Mi-r",
"CGrk": "1Jf51O0R911Il0V_SlacLQDNPaRjumsbD",
"CHP": "1dygf2mz6KN_ryDziNJEu47-OhH8jK_ff",
"Clarke": "1ZVpLAnlSmBaT10e5O7pljfziLUpyU4Dq",
"CPBST": "14zueTf0ioI-AKRo_8GK8PDRKael_kB1U",
"EBC": "1UA3tdZtIKQEx-xmXtM_SO1k8S8DKYm6r",
"ECER": "1sCJc5xuxqDDlmgSn2SFWTRbXnHSKXeh_",
"EGNT": "1ZvbWnuy2wwllt-s56FUfB2bS2_rZoiPx",
"GCT": "1vK53UO2rggdcfcDjH6mWXAdYti4UbzUt",
"Gill": "1O5jnHLsmoobkCypy9zJC-Sw_Ob-3pQ2t",
"Henry": "1m-8cM8uZPN-fLVcC-a9mhL3VXoYJ5Ku9",
"HH": "1RwKN1igd1RbN7phiJDiLPhqLXdgOR0Ms",
"ICCNT": "1QxrzeeZYc0-GNwqwdDe91H4j1hGSOG6t",
"JFB": "1NT02QxoLeY3Cj0uA_5142P5s64RkRlpO",
"KD": "1rFFDrdDMjImEwXkHkbh7-vX3g4kKUuGV",
"Lange": "1_PrTT71aQN5LJhbwabx-kjrA0vg-nvYY",
"MacL": "1p32F9MmQ2wigtUMdCU-biSrRZWrFLWJR",
"PHC": "1xTkY_YFyasN7Ks9me3uED1HpQnuYI8BW",
"Pulpit": "1briSh0oDhUX7QnW1g9oM3c4VWiThkWBG",
"Rob": "17VfPe4wsnEzSbxL5Madcyi_ubu3iYVkx",
"Spur": "1OVsqgHVAc_9wJBCcz6PjsNK5v9GfeNwp",
"Vincent": "1ZZNnCo5cSfUzjdEaEvZ8TcbYa4OKUsox",
"Wesley": "1rerXER1ZDn4e1uuavgFDaPDYus1V-tS5",
"Whedon": "1FPJUJOKodFKG8wsNAvcLLc75QbM5WO-9",
}
if commentary in cloudIDs:
return cloudIDs[commentary]
else:
return ""
def isDatabaseInstalled(self, keyword):
if keyword in self.databaseInfo():
fileItems = self.databaseInfo()[keyword][0]
if os.path.isfile(os.path.join(*fileItems)):
return True
else:
return False
else:
return True
def databaseNotInstalled(self, keyword):
databaseInfo = self.databaseInfo()[keyword]
self.parent.downloadHelper(databaseInfo)
return ("", "", {})
# return invalid command
def invalidCommand(self, source="main"):
if config.developer:
print(traceback.format_exc())
return (source, "INVALID_COMMAND_ENTERED", {})
# return no audio
def noAudio(self, source="main"):
return (source, "NO_AUDIO", {})
# return no Hebrew audio
def noHebrewAudio(self, source="main"):
return (source, "NO_HEBREW_AUDIO", {})
# return no Greek audio
def noGreekAudio(self, source="main"):
return (source, "NO_GREEK_AUDIO", {})
# sort out keywords from a single line command
def splitCommand(self, command):
commandList = re.split('[ ]*?:::[ ]*?', command, 1)
return commandList
# shared functions about config
def getViewConfig(self, view):
views = {
"main": (self.setMainVerse, config.mainText, self.bcvToVerseReference(config.mainB, config.mainC, config.mainV), config.mainB, config.mainC, config.mainV),
"study": (self.setStudyVerse, config.studyText, self.bcvToVerseReference(config.studyB, config.studyC, config.studyV), config.studyB, config.studyC, config.studyV),
"instant": (self.setMainVerse, config.mainText, self.bcvToVerseReference(config.mainB, config.mainC, config.mainV), config.mainB, config.mainC, config.mainV),
"cli": (self.setMainVerse, config.mainText, self.bcvToVerseReference(config.mainB, config.mainC, config.mainV), config.mainB, config.mainC, config.mainV),
"http": (self.setMainVerse, config.mainText, self.bcvToVerseReference(config.mainB, config.mainC, config.mainV), config.mainB, config.mainC, config.mainV),
}
return views[view]
def setMainVerse(self, text, bcvTuple):
config.mainText = text
config.mainB, config.mainC, config.mainV, *_ = bcvTuple
config.setMainVerse = True
self.parent.updateMainRefButton()
def setStudyVerse(self, text, bcvTuple):
config.studyText = text
config.studyB, config.studyC, config.studyV, *_ = bcvTuple
self.parent.updateStudyRefButton()
config.commentaryB, config.commentaryC, config.commentaryV, *_ = bcvTuple
self.parent.updateCommentaryRefButton()
def setCommentaryVerse(self, text, bcvTuple):
config.commentaryText = text
config.commentaryB, config.commentaryC, config.commentaryV, *_ = bcvTuple
self.parent.updateCommentaryRefButton()
config.studyB, config.studyC, config.studyV, *_ = bcvTuple
self.parent.updateStudyRefButton()
# shared functions about bible text
def getConfirmedTexts(self, texts, returnEmptyList=False):
biblesSqlite = BiblesSqlite()
bibleList = biblesSqlite.getBibleList()
confirmedTexts = [text for text in texts.split("_") if text in bibleList or text in self.getMarvelBibles()]
if not confirmedTexts and not returnEmptyList:
confirmedTexts = [config.favouriteBible]
#return sorted(list(set(confirmedTexts)))
confirmedTexts = list(set(confirmedTexts))
if config.mainText in confirmedTexts:
confirmedTexts.remove(config.mainText)
confirmedTexts = [config.mainText] + sorted(confirmedTexts)
return confirmedTexts
def extractAllVerses(self, text, tagged=False):
return BibleVerseParser(config.parserStandarisation).extractAllReferences(text, tagged)
def extractAllVersesFast(self, text):
return BibleVerseParser(config.parserStandarisation).extractAllReferencesFast(text)
def bcvToVerseReference(self, b, c, v):
return BibleVerseParser(config.parserStandarisation).bcvToVerseReference(b, c, v)
def isTextInCompareTexts(self, text, compareTexts):
return True if text in compareTexts[:-3].split("_") else False
def switchCompareView(self):
if self.parent.enforceCompareParallelButton:
self.parent.enforceCompareParallelButtonClicked()
else:
config.enforceCompareParallel = not config.enforceCompareParallel
# default function if no special keyword is specified
def textBibleVerseParser(self, command, text, view, parallel=False):
if config.enforceCompareParallel and not parallel:
compareMatches = re.match("^[Cc][Oo][Mm][Pp][Aa][Rr][Ee]:::(.*?:::)", config.history["main"][-1])
if view in ("main", "http") and compareMatches:
compareTexts = compareMatches.group(1)
if self.isTextInCompareTexts(text, compareTexts):
config.tempRecord = "COMPARE:::{0}{1}".format(compareTexts, command)
return self.textCompare("{0}{1}".format(compareTexts, command), view)
else:
self.switchCompareView()
parallelMatches = re.match("^[Pp][Aa][Rr][Aa][Ll][Ll][Ee][Ll]:::(.*?:::)", config.history["main"][-1])
if view in ("main", "http") and parallelMatches:
compareTexts = parallelMatches.group(1)
if self.isTextInCompareTexts(text, compareTexts):
config.tempRecord = "PARALLEL:::{0}{1}".format(compareTexts, command)
return self.textParallel("{0}{1}".format(compareTexts, command), view)
else:
self.switchCompareView()
compareSideBySideMatches = re.match("^[Ss][Ii][Dd][Ee][Bb][Yy][Ss][Ii][Dd][Ee]:::(.*?:::)", config.history["main"][-1])
if view in ("main", "http") and compareSideBySideMatches:
compareTexts = compareSideBySideMatches.group(1)
if self.isTextInCompareTexts(text, compareTexts):
config.tempRecord = "SIDEBYSIDE:::{0}{1}".format(compareTexts, command)
return self.textCompareSideBySide("{0}{1}".format(compareTexts, command), view)
else:
self.switchCompareView()
# Direct to bible search when there is no valid reference.
# Use the latest search mode for bible search.
# Qt library users can change bible search mode via master control
# Terminal mode users can change default search mode via ".changebiblesearchmode"
searchModes = ("COUNT", "SEARCH", "ANDSEARCH", "ORSEARCH", "ADVANCEDSEARCH", "REGEXSEARCH", "GPTSEARCH", "SEMANTIC")
if config.useLiteVerseParsing:
verseList = self.extractAllVersesFast(command)
if verseList[0][0] == 0:
command = re.sub(r" \d+:?\d?$", "", command)
command = f"{searchModes[config.bibleSearchMode]}:::{config.mainText}:::{command}"
return self.parser(command, view)
else:
verseList = self.extractAllVerses(command)
if not verseList:
command = f"{searchModes[config.bibleSearchMode]}:::{config.mainText}:::{command}"
return self.parser(command, view)
else:
formattedBiblesFolder = os.path.join(config.marvelData, "bibles")
formattedBibles = [f[:-6] for f in os.listdir(formattedBiblesFolder) if os.path.isfile(os.path.join(formattedBiblesFolder, f)) and f.endswith(".bible") and not re.search(r"^[\._]", f)]
if text in ("MOB", "MIB", "MTB", "MPB", "MAB", "LXX1i", "LXX2i", "LXX1", "LXX2") and not config.readFormattedBibles:
config.readFormattedBibles = True
self.parent.enableParagraphButtonAction(False)
elif config.readFormattedBibles and (((text in ("OHGBi", "OHGB") or not text in formattedBibles) and view == "main") or text == "LXX"):
config.readFormattedBibles = False
self.parent.enableParagraphButtonAction(False)
# Custom font styling for Bible
(fontFile, fontSize, css) = Bible(text).getFontInfo()
if view == "main":
config.mainCssBibleFontStyle = css
elif view == "study":
config.studyCssBibleFontStyle = css
if (len(verseList) == 1) and (len(verseList[0]) == 3):
compareParallelList = "_".join(config.compareParallelList)
if config.runMode == "terminal" and config.terminalBibleParallels:
return self.textCompareSideBySide(f"{compareParallelList}:::{command}", view)
elif config.runMode == "terminal" and config.terminalBibleComparison:
return self.textCompare(f"{compareParallelList}:::{command}", view)
# i.e. only one verse reference is specified
bcvTuple = verseList[0]
# Force book to 1 if it's 0 (when viewing a commentary intro)
if bcvTuple[1] == 0:
bcvTuple = (bcvTuple[0], 1, 1)
chapters = self.getChaptersMenu(bcvTuple[0], bcvTuple[1], text) if config.displayChapterMenuTogetherWithBibleChapter else ""
content = "{0}{2}{1}{2}{0}".format(chapters, self.textFormattedBible(bcvTuple, text, view), "<hr>" if config.displayChapterMenuTogetherWithBibleChapter else "")
else:
# i.e. when more than one verse reference is found
content = self.textPlainBible(verseList, text)
bcvTuple = verseList[-1]
content = self.toggleBibleText(content)
# Add text tag for custom font styling
content = "<bibletext class='{0}'>{1}</bibletext>".format(text, content)
config.eventContent = content
PluginEventHandler.handleEvent("post_parse_bible", command)
content = config.eventContent
if config.openBibleInMainViewOnly:
self.setMainVerse(text, bcvTuple)
#self.setStudyVerse(text, bcvTuple)
return ("main", content, {})
else:
updateViewConfig, *_ = self.getViewConfig(view)
updateViewConfig(text, bcvTuple)
return (view, content, {'tab_title': text})
def toggleBibleText(self, text):
# The following line does not work when config.displayChapterMenuTogetherWithBibleChapter is set to True.
#isMarvelBibles = True if re.search("_instantVerse:::(MOB|MIB|MPB|MTB|MAB|OHGB|OHGBi):::", text) else False
# use the following line instead
isMarvelBibles = True if re.search("_chapters:::(MOB|MIB|MPB|MTB|MAB|OHGB|OHGBi)_", text) else False
# The following line does not work when config.displayChapterMenuTogetherWithBibleChapter is set to True.
#isMIB = ("_instantVerse:::MIB:::" in text)
# use the following line instead
isMIB = ("_chapters:::MIB_" in text)
if (config.showHebrewGreekWordAudioLinks and isMarvelBibles) or (config.showHebrewGreekWordAudioLinksInMIB and isMIB):
text = re.sub("(<pm>|</pm>|<n>|</n>)", "", text)
text = re.sub("""(<heb id="wh)([0-9]+?)("[^<>]*?onclick="luW\()([0-9]+?)(,[^<>]*?>[^<>]+?</heb>[ ]*)""", r"""\1\2\3\4\5 <ref onclick="wah(\4,\2)">{0}</ref>""".format(config.audioBibleIcon), text)
text = re.sub("""(<grk id="w[0]*?)([1-9]+[0-9]*?)("[^<>]*?onclick="luW\()([0-9]+?)(,[^<>]*?>[^<>]+?</grk>[ ]*)""", r"""\1\2\3\4\5 <ref onclick="wag(\4,\2)">{0}</ref>""".format(config.audioBibleIcon), text)
if isMIB:
text = text.replace(config.audioBibleIcon, "***")
text = re.sub("""([ ]*<ref onclick="wa[gh])(\([0-9]+?,[0-9]+?\)">[^<>]+?</ref>)(.*?</wform>.*?<wlex>.*?</wlex></ref>)""", r"\1\2\3\1l\2", text)
text = text.replace("***", config.audioBibleIcon)
if not config.showVerseReference:
text = re.sub('<vid .*?>.*?</vid>', '', text)
if not config.showBibleNoteIndicator:
text = re.sub("<sup><ref onclick='bn\([^\(\)]*?\)'>⊕</ref></sup>", '', text)
if config.hideLexicalEntryInBible and re.search("onclick=['{0}]lex".format('"'), text):
p = re.compile(r"<[^\n<>]+?onclick='lex\({0}([^\n<>]+?){0}\)'>[^\n<>]+?</[^\n<>]+?>[ ]*?<[^\n<>]+?onclick='lex\({0}([^\n<>]+?){0}\)'>[^\n<>]+?</[^\n<>]+?>".format('"'))
while p.search(text):
text = p.sub(r"<ref onclick='lex({0}\1_\2{0})'>*</ref>".format('"'), text)
p = re.compile(r"<[^\n<>]+?onclick='rmac\({0}([^\n<>]+?){0}\)'>[^\n<>]+?</[^\n<>]+?>[ ]*?<[^\n<>]+?onclick='rmac\({0}([^\n<>]+?){0}\)'>[^\n<>]+?</[^\n<>]+?>".format('"'))
while p.search(text):
text = p.sub(r"<ref onclick='rmac({0}\1_\2{0})'>*</ref>".format('"'), text)
searchReplace = {
(r"<sup><[^\n<>]+?onclick='lex\({0}([^\n<>]+?){0}\)'>[^\n<>]+?</[^\n<>]+?> <[^\n<>]+?onclick='rmac\({0}([^\n<>]+?){0}\)'>[^\n<>]+?</[^\n<>]+?></sup>".format('"'), r"<sup><ref onclick='lmCombo({0}\1{0}, {0}rmac{0}, {0}\2{0})'>*</ref></sup>".format('"')),
(r"<sup><[^\n<>]+?onclick='lex\({0}([^\n<>]+?){0}\)'>[^\n<>]+?</[^\n<>]+?></sup>".format('"'), r"<sup><ref onclick='lex({0}\1{0})'>*</ref></sup>".format('"')),
}
for search, replace in searchReplace:
text = re.sub(search, replace, text)
p = re.compile(r"(<sup><ref onclick='bn\([^\n\(\)]*?\)'>⊕</ref></sup>|<woj>⸃</woj>|</woj>|</i>|</ot>|</mbe>|</mbn>)(<sup><ref onclick='l[^\r<>]*?>\*</ref></sup>)")
while p.search(text):
text = p.sub(r"\2\1", text)
p = re.compile(r"([^\n<>]+?)<sup><ref (onclick='l[^\r<>]*?>)\*</ref></sup>")
while p.search(text):
text = p.sub(r"<tag \2\1</tag>", text)
return text
def getChaptersMenu(self, b, c, text):
biblesSqlite = BiblesSqlite()
chapteruMenu = biblesSqlite.getChaptersMenu(b, c, text)
return chapteruMenu
# access to formatted chapter or plain verses of a bible text, called by textBibleVerseParser
def textPlainBible(self, verseList, text):
biblesSqlite = BiblesSqlite()
verses = biblesSqlite.readMultipleVerses(text, verseList)
return verses
def textFormattedBible(self, verse, text, source=""):
formattedBiblesFolder = os.path.join(config.marvelData, "bibles")
formattedBibles = [f[:-6] for f in os.listdir(formattedBiblesFolder) if os.path.isfile(os.path.join(formattedBiblesFolder, f)) and f.endswith(".bible") and not re.search(r"^[\._]", f)]
#marvelBibles = ("MOB", "MIB", "MAB", "MPB", "MTB", "LXX1", "LXX1i", "LXX2", "LXX2i")
#marvelBibles = list(self.getMarvelBibles().keys())
# bibleSqlite = Bible(text)
#if source in ("cli"):
# b, c, v, *_ = verse
# bibleSqlite = Bible(text)
# b, c, v, content = bibleSqlite.readTextVerse(b, c, v)
bibleSqlite = Bible(text)
if text in formattedBibles and text not in ("OHGB", "OHGBi", "LXX") and config.readFormattedBibles:
content = bibleSqlite.readFormattedChapter(verse, source)
else:
# use plain bibles database when corresponding formatted version is not available
language = bibleSqlite.getLanguage()
content = BiblesSqlite(language).readPlainChapter(text, verse, source)
if config.runMode == "terminal":
if config.terminalEnablePager:
singleVerse = self.textPlainBible([verse], text)
content = f"{singleVerse}<br>{config.mainWindow.divider}<br>{content}"
else:
singleVerse = self.textPlainBible([verse], text)
content = f"{content}<br>{config.mainWindow.divider}<br>{singleVerse}"
return content
# cmd:::
# run os command
def osCommand(self, command, source):
window = ""
display = ""
if (config.runMode == "docker"):
WebtopUtil.run(command)
elif config.runMode == "http-server" and not config.enableCmd:
print("Command keyword CMD::: is not enabled for security reason. To enable it, set 'enableCmd = True' in file 'config.py'.")
else:
runCmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = runCmd.communicate()
output = stdout.decode("utf-8").replace("\n", "<br>")
error = stderr.decode("utf-8").replace("\n", "<br>")
if config.displayCmdOutput:
window = "study"
display = "<h2>Output</h2><p>{0}</p><h2>Error</h2><p>{1}</p>".format(output if output else "[no output]", error if error else "[no error]")
#if platform.system() == "Linux":
#subprocess.Popen([command], shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
#else:
#os.system(command)
return (window, display, {})
# check if espeak is installed.
def isEspeakInstalled(self):
espeakInstalled, _ = subprocess.Popen("which espeak", shell=True, stdout=subprocess.PIPE).communicate()
if espeakInstalled:
return True
else:
return False
# check if module is installed.
def isCommandInstalled(self, command):
commandInstalled, _ = subprocess.Popen("which {0}".format(command), shell=True, stdout=subprocess.PIPE).communicate()
if commandInstalled:
return True
else:
return False
# gtts:::
# run google text to speech feature
# internet is required
def googleTextToSpeech(self, command, source):
# Stop current playing first if any:
self.stopTtsAudio()
# Language codes: https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
if command.count(":::") != 0:
language, text = self.splitCommand(command)
else:
language = "en-GB" if config.isGoogleCloudTTSAvailable else "en"
text = command
# fine-tune
text, language = self.parent.fineTuneGtts(text, language)
try:
if config.runMode == "terminal" and config.terminalEnableTermuxAPI:
# Option 1
config.mainWindow.createAudioPlayingFile()
text = re.sub("(\. |。)", r"\1*", text)
for i in text.split("*"):
if not os.path.isfile(config.audio_playing_file):
break
print(i)
pydoc.pipepager(i, cmd=f"termux-tts-speak -l {language} -r {config.terminalTermuxttsSpeed}")
config.mainWindow.removeAudioPlayingFile()
# Output file shared by option 2 and option 3
#outputFile = os.path.join("terminal_history", "gtts")
#with open(outputFile, "w", encoding="utf-8") as f:
# f.write(text)
#command = f"cat {outputFile} | termux-tts-speak -l {language} -r {config.terminalTermuxttsSpeed}"
# Option 2
#WebtopUtil.run(command)
# Option 3
#config.cliTtsProcess = subprocess.Popen([command], shell=True, preexec_fn=os.setpgrp, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Define default tts language
config.ttsDefaultLangauge = language
return ("", "", {})
else:
if config.isGoogleCloudTTSAvailable:
self.parent.saveCloudTTSAudio(text, language)
else:
self.parent.saveGTTSAudio(text, language)
audioFile = self.parent.getGttsFilename()
if os.path.isfile(audioFile):
self.openMediaPlayer(audioFile, "main", gui=False)
except:
if config.developer:
print(traceback.format_exc())
else:
self.parent.displayMessage(config.thisTranslation["message_fail"])
# Keep the following codes for future reference
# The following method does not work on Windows
# if not platform.system(") == "Windows" and (Gtts" in config.enabled):
# if not self.isCommandInstalled("gtts-cli"):
# installmodule("gTTS")
# if self.isCommandInstalled("gtts-cli") and self.isCommandInstalled("play"):
# command = "gtts-cli '{0}' --lang {1} --nocheck | play -t mp3 -".format(text, language)
# print(command)
# self.cliTtsProcess = subprocess.Popen([command], shell=True, preexec_fn=os.setpgrp, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# elif self.isCommandInstalled("gtts-cli") and not self.isCommandInstalled("play"):
# message = "Install sox FIRST! \nFor examples, run: \non macOS, 'brew install sox' \non Ubuntu / Debian, 'sudo apt install sox' \non Arch Linux, 'sudo pacman -S sox'"
# self.parent.displayMessage(message)
# elif not self.isCommandInstalled("gtts-cli") and not self.isCommandInstalled("play"):
# message = "Install gTTS and sox FIRST! \nFor example, on Arch Linux, run:\n'pip3 install gTSS' and \n'sudo pacman -S sox'"
# self.parent.displayMessage(message)
return ("", "", {})
# speak:::
# run text to speech feature
def textToSpeech(self, command, source):
if config.forceOnlineTts:
return self.googleTextToSpeech(command, source)
# Stop current playing first if any:
self.stopTtsAudio()
# Language codes: https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
language = config.ttsDefaultLangauge
text = command
if command.count(":::") != 0:
language, text = self.splitCommand(command)
if language.startswith("["):
if language in config.macVoices:
# save a text file first to avoid quotation marks in the text
if language.startswith("[el_GR]"):
text = TextUtil.removeVowelAccent(text)
with open('temp/temp.txt', 'w') as file:
file.write(text)
voice = re.sub("^\[.*?\] ", "", language)
# The following does not support "stop" feature
#WebtopUtil.run(f"say -v {voice} -f temp/temp.txt")
command = f"say -r {config.macOSttsSpeed} -v {voice} -f temp/temp.txt"
self.cliTtsProcess = subprocess.Popen([command], shell=True, preexec_fn=os.setpgrp, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
self.parent.displayMessage(config.thisTranslation["message_noTtsVoice"])
else:
# espeak has no support of "ko", "ko" here is used to correct detection of traditional chinese
# It is not recommended to use "ko" to correct language detection for "zh-tw", if qt built-in tts engine is used.
# Different from espeak, Qt text-to-speech has a qlocale on Korean.
# If the following two lines are uncommented, Korean text cannot be read.
# In case the language is wrongly detected, users can still use command line to specify a correct language.
if (config.espeak) and (language == "ko"):
language = "zh-tw"
if (language == "zh-cn") or (language == "zh-tw"):
if config.ttsChineseAlwaysCantonese:
language = "zh-tw"
elif config.ttsChineseAlwaysMandarin:
language = "zh-cn"
elif (language == "en") or (language == "en-gb"):
if config.ttsEnglishAlwaysUS:
language = "en"
elif config.ttsEnglishAlwaysUK:
language = "en-gb"
elif (language == "el"):
# Modern Greek
#language = "el"
# Ancient Greek
# To read accented Greek text, language have to be "grc" instead of "el" for espeak
# In dictionary mapping language to qlocale, we use "grc" for Greek language too.
language = "grc"
elif (config.espeak) and (language == "he"):
# espeak itself does not support Hebrew language
# Below workaround on Hebrew text-to-speech feature for espeak
# Please note this workaround is not a perfect solution, but something workable.
text = HebrewTransliteration().transliterateHebrew(text)
# Use "grc" to read, becuase it sounds closer to "he" than "en" does.
language = "grc"
if platform.system() == "Linux" and config.espeak:
if WebtopUtil.isPackageInstalled("espeak"):
isoLang2epeakLang = TtsLanguages().isoLang2epeakLang
languages = isoLang2epeakLang.keys()
if not (config.ttsDefaultLangauge in languages):
config.ttsDefaultLangauge = "en"
if not (language in languages):
if config.runMode == "terminal":
print(f"'{language}' is not found!")
print("Available languages:", languages)
else:
self.parent.displayMessage(config.thisTranslation["message_noTtsVoice"])
language = config.ttsDefaultLangauge
print(f"Language changed to '{language}'")
language = isoLang2epeakLang[language][0]
# subprocess is used
WebtopUtil.run("espeak -s {0} -v {1} '{2}'".format(config.espeakSpeed, language, text))
else:
self.parent.displayMessage(config.thisTranslation["message_noEspeak"])
else:
# use qt built-in tts engine
engineNames = QTextToSpeech.availableEngines()
if engineNames:
self.qtTtsEngine = QTextToSpeech(engineNames[0])
#locales = self.qtTtsEngine.availableLocales()
#print(locales)
isoLang2qlocaleLang = TtsLanguages().isoLang2qlocaleLang
languages = TtsLanguages().isoLang2qlocaleLang.keys()
if not (config.ttsDefaultLangauge in languages):
config.ttsDefaultLangauge = "en"
if not (language in languages):
self.parent.displayMessage(config.thisTranslation["message_noTtsVoice"])
language = config.ttsDefaultLangauge
self.qtTtsEngine.setLocale(isoLang2qlocaleLang[language][0])
self.qtTtsEngine.setVolume(1.0)
engineVoices = self.qtTtsEngine.availableVoices()
if engineVoices:
self.qtTtsEngine.setVoice(engineVoices[0])
# Control speed here
self.qtTtsEngine.setRate(config.qttsSpeed)
self.qtTtsEngine.say(text)
else:
self.parent.displayMessage(config.thisTranslation["message_noTtsVoice"])
return ("", "", {})
def stopTtsAudio(self):
self.parent.closeMediaPlayer()
# if self.cliTtsProcess is not None:
# #print(self.cliTtsProcess)
# # The following two lines do not work:
# #self.cliTtsProcess.kill()
# #self.cliTtsProcess.terminate()
# # Therefore, we use:
# try:
# os.killpg(os.getpgid(self.cliTtsProcess.pid), signal.SIGTERM)
# except:
# pass
# self.cliTtsProcess = None
# elif self.qtTtsEngine is not None:
# self.qtTtsEngine.stop()
def terminalDownloadYoutubeFile(self, downloadCommand, command, outputFolder):
if self.isFfmpegInstalled():
try:
print(config.mainWindow.divider)
#print("Downloading ...")
#subprocess.run(["cd {2}; {0} {1}".format(downloadCommand, command, outputFolder)], shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
# use os.system instead, as it displays download status ...
os.system("cd {2}; {0} {1}".format(downloadCommand, command, outputFolder))
if WebtopUtil.isPackageInstalled("pkill"):
os.system("pkill yt-dlp")
print(f"Downloaded in directory '{outputFolder}'!")
except:
print("Errors!")
#except subprocess.CalledProcessError as err:
#config.mainWindow.displayMessage(err, title="ERROR:")
else:
print("Tool 'ffmpeg' is not found on your system!")
return ("", "", {})
# mp3:::
def mp3Download(self, command, source):
downloadCommand = "yt-dlp -x --audio-format mp3"
if config.runMode == "terminal":
return self.terminalDownloadYoutubeFile(downloadCommand, command, config.musicFolder)
else:
self.downloadYouTubeFile(downloadCommand, command, config.musicFolder)
"""
if not platform.system() == "Linux":
# version 1: known issue - the download process blocks the main window
self.downloadYouTubeFile(downloadCommand, command, config.musicFolder)
else:
# version 2: known issue - only works on Linux, but not macOS or Windows
multiprocessing.Process(target=self.downloadYouTubeFile, args=(downloadCommand, command, config.musicFolder)).start()
self.parent.displayMessage(config.thisTranslation["downloading"])
"""
#self.parent.reloadResources()
return ("", "", {})
# mp4:::
def mp4Download(self, command, source):
downloadCommand = "yt-dlp -f bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4"
if config.runMode == "terminal":
return self.terminalDownloadYoutubeFile(downloadCommand, command, config.videoFolder)
else:
self.downloadYouTubeFile(downloadCommand, command, config.videoFolder)
"""
if not platform.system() == "Linux":
# version 1: known issue - the download process blocks the main window
self.downloadYouTubeFile(downloadCommand, command, config.videoFolder)
else:
# version 2: known issue - only works on Linux, but not macOS or Windows
multiprocessing.Process(target=self.downloadYouTubeFile, args=(downloadCommand, command, config.videoFolder)).start()
self.parent.displayMessage(config.thisTranslation["downloading"])
"""
#self.parent.reloadResources()
return ("", "", {})
def youtubeDownload(self, downloadCommand, youTubeLink):
self.downloadYouTubeFile(downloadCommand, youTubeLink, config.videoFolder)
"""
if not platform.system() == "Linux":
# version 1: known issue - the download process blocks the main window
self.downloadYouTubeFile(downloadCommand, youTubeLink, config.videoFolder)
else:
# version 2: known issue - only works on Linux, but not macOS or Windows
multiprocessing.Process(target=self.downloadYouTubeFile, args=(downloadCommand, youTubeLink, config.videoFolder)).start()
self.parent.displayMessage(config.thisTranslation["downloading"])
"""
#self.parent.reloadResources()
def isFfmpegInstalled(self):
ffmpegVersion = subprocess.Popen("ffmpeg -version", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
*_, stderr = ffmpegVersion.communicate()
return False if stderr else True
def getYouTubeDownloadOptions(self, url):
options = subprocess.Popen("yt-dlp --list-formats {0}".format(url), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, *_ = options.communicate()
options = stdout.decode("utf-8").split("\n")
return [option for option in options if re.search(r"^[0-9]+? ", option)]
def downloadYouTubeFile(self, downloadCommand, youTubeLink, outputFolder, noFfmpeg=False):
# Download / upgrade to the latest version
if not hasattr(config, "youtubeDlIsUpdated") or (hasattr(config, "youtubeDlIsUpdated") and not config.youtubeDlIsUpdated):
installmodule("--upgrade yt-dlp")
config.youtubeDlIsUpdated = True
if self.isFfmpegInstalled() or not noFfmpeg:
if config.runMode in ("", "cli", "gui"):
self.parent.workOnDownloadYouTubeFile(downloadCommand, youTubeLink, outputFolder)
"""
elif platform.system() == "Linux":
try:
subprocess.run(["cd {2}; {0} {1}".format(downloadCommand, youTubeLink, outputFolder)], shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
subprocess.Popen([config.open, outputFolder])
except subprocess.CalledProcessError as err:
self.parent.displayMessage(err, title="ERROR:")
# on Windows
elif platform.system() == "Windows":
try:
os.system(r"cd .\{2}\ & {0} {1}".format(downloadCommand, youTubeLink, outputFolder))
os.system(r"{0} {1}".format(config.open, outputFolder))
except:
self.parent.displayMessage(config.thisTranslation["noSupportedUrlFormat"], title="ERROR:")
# on Unix-based system, like macOS
else:
try:
os.system(r"cd {2}; {0} {1}".format(downloadCommand, youTubeLink, outputFolder))
if (config.runMode == "docker"):
WebtopUtil.openDir(dir)
else:
os.system(r"{0} {1}".format(config.open, outputFolder))
except:
self.parent.displayMessage(config.thisTranslation["noSupportedUrlFormat"], title="ERROR:")
"""
else:
self.parent.displayMessage(config.thisTranslation["ffmpegNotFound"])
wikiPage = "https://github.com/eliranwong/UniqueBible/wiki/Install-ffmpeg"
if config.enableHttpServer:
subprocess.Popen("{0} {1}".format(config.open, wikiPage), shell=True)
else:
webbrowser.open(wikiPage)
def keyToStopStreaming(self, playback_event):
async def readKeys() -> None:
done = False
input = create_input()
def keys_ready():
nonlocal done
for key_press in input.read_keys():
#print(key_press)
if key_press.key in (Keys.ControlQ, Keys.ControlZ):
print("\nStopping audio playback ...")
self.parent.closeMediaPlayer()
done = True
playback_event.set()
with input.raw_mode():
with input.attach(keys_ready):
while not done:
if config.playback_finished:
break
await asyncio.sleep(0.1)
asyncio.run(readKeys())
# READSYNC:::
def textReadSync(self, command, source):
return self.textRead(command, source, True) if config.runMode == "terminal" else ("study", "Currently, only terminal mode supports running READSYNC::: command.", {})
# READ:::
def textRead(self, command, source, displayText=False):
if command.count(":::") == 0:
updateViewConfig, viewText, *_ = self.getViewConfig(source)
command = "{0}:::{1}".format(viewText, command)
texts, references = self.splitCommand(command)
texts = list(set([FileUtil.getMP3TextFile(text) for text in self.getConfirmedTexts(texts)]))
verseList = self.extractAllVerses(references)
if verseList:
allPlayList = []
allTextList = []
for verse in verseList:
for text in texts:
everySingleVerseList = Bible(text).getEverySingleVerseList((verse,))
playlist = []
textList = []
for b, c, v in everySingleVerseList:
folder = os.path.join(config.audioFolder, "bibles", text, "default", "{0}_{1}".format(b, c))
audioFile = os.path.join(folder, "{0}_{1}_{2}_{3}.mp3".format(text, b, c, v))
if os.path.isfile(audioFile):
playlist.append(audioFile)
if config.runMode == "terminal" and displayText:
try:
*_, verseText = Bible(text).readTextVerse(b, c, v)
verseText = TextUtil.htmlToPlainText(f"[<ref>{self.bcvToVerseReference(b, c, v)}</ref> ]{verseText}").strip()
verseText = verseText.replace("audiotrack ", "")
textList.append(verseText)
except:
textList.append("")
allPlayList += playlist
allTextList += textList
if config.enableHttpServer:
target = "study"
allPlayList = [(os.path.basename(fullpath), fullpath) for fullpath in allPlayList]
content = HtmlGeneratorUtil().getAudioPlayer(allPlayList)
else:
target = ""
content = ""
if config.runMode == "terminal":
# Create a new thread for the streaming task
config.playback_finished = False
playback_event = threading.Event()
if displayText:
self.playback_thread = threading.Thread(target=self.parent.playAudioBibleFilePlayListPlusDisplayText, args=(allPlayList, allTextList, False, playback_event))
else:
self.playback_thread = threading.Thread(target=self.parent.playAudioBibleFilePlayList, args=(allPlayList,))
# Start the streaming thread
self.playback_thread.start()
# wait while text output is steaming; capture key combo 'ctrl+q' or 'ctrl+z' to stop the streaming
self.keyToStopStreaming(playback_event)
# when streaming is done or when user press "ctrl+q"
self.playback_thread.join()
# old way
#self.parent.playAudioBibleFilePlayListPlusDisplayText(allPlayList, allTextList) if displayText else self.parent.playAudioBibleFilePlayList(allPlayList)
else:
self.parent.playAudioBibleFilePlayList(allPlayList)
return (target, content, {})
else:
return self.invalidCommand()
# READCHAPTER:::
def readChapter(self, command, source):
#try:
content = ""
target = "study" if config.enableHttpServer else ""
items = command.split(".")
if len(items) == 3:
text, b, c = items
if config.enableHttpServer:
playlist = self.parent.playAudioBibleChapterVerseByVerse(text, b, c)
content = HtmlGeneratorUtil().getAudioPlayer(playlist)
else:
self.parent.playAudioBibleChapterVerseByVerse(text, b, c)
elif len(items) == 4:
text, b, c, startVerse = items
if config.enableHttpServer:
playlist = self.parent.playAudioBibleChapterVerseByVerse(text, b, c, int(startVerse))
content = HtmlGeneratorUtil().getAudioPlayer(playlist)
else:
self.parent.playAudioBibleChapterVerseByVerse(text, b, c, int(startVerse))
return (target, content, {})
#except:
# return self.invalidCommand()
# READVERSE:::
def readVerse(self, command, source):
self.parent.closeMediaPlayer()
text, b, c, v = command.split(".")
folder = os.path.join(config.audioFolder, "bibles", text, "default", "{0}_{1}".format(b, c))
filename = "{0}_{1}_{2}_{3}.mp3".format(text, b, c, v)
audioFile = os.path.join(folder, filename)
if os.path.isfile(audioFile):
if config.enableHttpServer:
playlist = [(filename, audioFile)]
content = HtmlGeneratorUtil().getAudioPlayer(playlist)
return ("study", content, {})
else:
try:
if config.mainWindow.audioPlayer is not None:
config.mainWindow.addToAudioPlayList(audioFile, True)
elif config.isVlcAvailable:
VlcUtil.playMediaFile(audioFile, config.vlcSpeed, (not config.hideVlcInterfaceReadingSingleVerse))
else:
self.parent.displayMessage(config.thisTranslation["noMediaPlayer"])
return ("", "", {})
except:
return self.invalidCommand()
else:
return self.noAudio()
# READWORD:::
def readWord(self, command, source):
if not source == 'http':
self.parent.closeMediaPlayer()
text, b, c, v, wordID = command.split(".")
folder = os.path.join(config.audioFolder, "bibles", text, "default", "{0}_{1}".format(b, c))
filename = "{0}_{1}_{2}_{3}_{4}.mp3".format(text, b, c, v, wordID)
audioFile = os.path.join(folder, filename)
if os.path.isfile(audioFile):
if config.enableHttpServer:
playlist = [(filename, audioFile)]
content = HtmlGeneratorUtil().getAudioPlayer(playlist)
return ("study", content, {})
else:
try:
if config.mainWindow.audioPlayer is not None:
config.mainWindow.addToAudioPlayList(audioFile, True)
elif config.isVlcAvailable:
VlcUtil.playMediaFile(audioFile, config.vlcSpeed, False)
else:
self.parent.displayMessage(config.thisTranslation["noMediaPlayer"])
return ("", "", {})
except:
return self.invalidCommand()
else:
if text == "BHS5":
return self.noHebrewAudio()
if text == "OGNT":
return self.noGreekAudio()
else:
return self.noAudio()
# READLEXEME:::
def readLexeme(self, command, source):
self.parent.closeMediaPlayer()
text, b, c, v, wordID = command.split(".")
folder = os.path.join(config.audioFolder, "bibles", text, "default", "{0}_{1}".format(b, c))
filename = "lex_{0}_{1}_{2}_{3}_{4}.mp3".format(text, b, c, v, wordID)
audioFile = os.path.join(folder, filename)
if os.path.isfile(audioFile):
if config.enableHttpServer:
playlist = [(filename, audioFile)]
content = HtmlGeneratorUtil().getAudioPlayer(playlist)
return ("study", content, {})
else:
try:
if config.mainWindow.audioPlayer is not None:
config.mainWindow.addToAudioPlayList(audioFile, True)
elif config.isVlcAvailable:
VlcUtil.playMediaFile(audioFile, config.vlcSpeed, False)
else:
self.parent.displayMessage(config.thisTranslation["noMediaPlayer"])
return ("", "", {})
except:
return self.invalidCommand()
else:
if text == "BHS5":
return self.noHebrewAudio()
if text == "OGNT":
return self.noGreekAudio()
else:
return self.noAudio()
# MEDIA:::
def openMediaPlayer(self, command, source, gui=True):
command = command.strip()
if not os.path.isfile(command):
return self.invalidCommand()
self.parent.closeMediaPlayer()
try:
if config.mainWindow.audioPlayer is not None:
config.mainWindow.addToAudioPlayList(command, True)
elif config.isVlcAvailable:
VlcUtil.playMediaFile(command, config.vlcSpeed, gui)
else:
self.parent.displayMessage(config.thisTranslation["noMediaPlayer"])
except:
WebtopUtil.openFile(command)
return ("", "", {})
# READBIBLE:::
def readBible(self, command, source):
text = config.mainText
book = config.mainB
chapter = config.mainC
folder = config.defaultMP3BibleFolder
playlist = []
if command:
count = command.count(":::")
if count == 0:
if command.startswith("@"):
folder = command[1:]
playlist.append((text, book, chapter, None, folder))
else:
playlist = self.getBiblePlaylist(command, text, folder)
elif count == 1:
text, reference = self.splitCommand(command)
playlist = self.getBiblePlaylist(reference, text, folder)
elif count == 2:
text, commandList = self.splitCommand(command)
reference, folder = self.splitCommand(commandList)
playlist = self.getBiblePlaylist(reference, text, folder)
else:
playlist.append((text, book, chapter, None, folder))
self.parent.playBibleMP3Playlist(playlist)
return ("", "", {})
def getBiblePlaylist(self, command, text, folder):
playlist = []
if "," in command:
parts = command.split(",")
for part in parts:
verseList = self.extractAllVerses(part)
book, chapter, verse = verseList[0]
playlist.append((text, book, chapter, None, folder))
elif "-" in command:
start, end = command.split("-")
verseList = self.extractAllVerses(start)
book, chapter, verse = verseList[0]
if ":" in start:
for index in range(int(verse), int(end)+1):
playlist.append((text, book, chapter, index, folder))
else:
for index in range(int(chapter), int(end)+1):
playlist.append((text, book, index, None, folder))
else:
verseList = self.extractAllVerses(command)
book, chapter, verse = verseList[0]
if ":" not in command:
verse = None
playlist.append((text, book, chapter, verse, folder))
return playlist
# functions about bible
# outline:::
def textBookOutline(self, command, source):
verseList = self.extractAllVerses(command)
if not verseList:
return self.invalidCommand()
else:
b, *_ = verseList[0]
bookName = BibleBooks.abbrev["eng"][str(b)][-1]
content = f"<h2>{bookName}</h2>"
for c in Bible(config.mainText).getChapterList(b):
#subheadings = BiblesSqlite().getChapterSubheadings(b, c)
subheadings = AGBTSData().getChapterFormattedSubheadings(b, c)
content += f"<p>{subheadings}</p>"
return ("study", content, {})
# overview:::
def textChapterOverview(self, command, source):
verseList = self.extractAllVerses(command)
if not verseList:
return self.invalidCommand()
else:
content = ""
for b, c, *_ in verseList:
chapterReference = self.bcvToVerseReference(b, c, 1)[:-2]
#subheadings = BiblesSqlite().getChapterSubheadings(b, c)
subheadings = AGBTSData().getChapterFormattedSubheadings(b, c)
if subheadings:
subheadings = "<p>{0}</p>".format(subheadings)
parallels = CollectionsSqlite().getChapterParallels(b, c)
if parallels:
parallels = "<hr><p><bb>Harmonies and Parallels</bb></p><p>{0}</p>".format(parallels)
promises = CollectionsSqlite().getChapterPromises(b, c)
if promises:
promises = "<hr><p><bb>Bible Promises</bb></p><p>{0}</p>".format(promises)
content += "<p><bb>{0}</bb></p>{1}{2}{3}<hr>".format(chapterReference, subheadings, parallels, promises)
return ("study", content, {})
# summary:::
def textChapterSummary(self, command, source):
verseList = self.extractAllVerses(command)
if not verseList:
return self.invalidCommand()
else:
content = ""
for b, c, *_ in verseList:
chapterSummary = Commentary("Brooks").getContent((b, c, 1))
if chapterSummary:
chapterSummary = "<p><bb>Complete Summary of the Bible (Brooks)</bb></p><p>{0}</p><hr>".format(chapterSummary)
content += chapterSummary
return ("study", content, {})
# CONCORDANCE:::
def textConcordance(self, command, source):
if command.count(":::") == 0:
command = "{0}:::{1}".format("OHGBi", command)
texts, strongNo = self.splitCommand(command)
if texts == "ALL":
texts = self.parent.strongBibles
else:
texts = self.getConfirmedTexts(texts)
texts = ["OHGBi" if text in ("MOB", "MIB", "MTB", "MAB", "MPB", "OHGB") else text for text in texts]
texts = [text for text in texts if text in self.parent.strongBibles]
texts = list(set(texts))
if not texts or not re.match("^[EGH][0-9]+?$", strongNo):
return self.invalidCommand()
else:
config.concordance = texts[-1]
config.concordanceEntry = strongNo
html = "<hr>".join([Bible(text).formatStrongConcordance(strongNo) for text in texts])
return ("study", html, {})
def cancelBibleParallels(self):
if config.runMode == "terminal":
config.terminalBibleParallels = False
config.terminalBibleComparison = False
# DISPLAYWORDFREQUENCY:::KJVx:::Matt 1
# DISPLAYWORDFREQUENCY:::KJVx:::Matt 1:::custom
def displayWordFrequency(self, command, source):
customFile = "custom"
if command.count(":::") == 2:
texts, references, customFile = command.split(":::")
else:
texts, references = command.split(":::")
config.readFormattedBibles = False
statisticsSqlite = StatisticsWordsSqlite()
data = self.textBible(f"{texts}:::{references}", source)
text = data[1]
matches = re.findall(r" ([GH][0-9]*?) ", text)
highlightMapping = statisticsSqlite.loadHighlightMappingFile(customFile)
for strongs in set(matches):
frequency = statisticsSqlite.getFrequency(strongs)
color = ""
for map in highlightMapping:
if frequency >= int(map[0]) and frequency <= int(map[1]):
if config.theme in ("dark", "night"):
color = map[3]
else:
color = map[2]
if color:
text = statisticsSqlite.addHighlightTagToPreviousWord(text, strongs, color, frequency)
return (data[0], text, data[2])
# BIBLE:::
def textBible(self, command, source):
if command.count(":::") == 0:
if config.openBibleInMainViewOnly:
updateViewConfig, viewText, *_ = self.getViewConfig("main")
else:
updateViewConfig, viewText, *_ = self.getViewConfig(source)
command = "{0}:::{1}".format(viewText, command)
texts, references = self.splitCommand(command)
texts = self.getConfirmedTexts(texts)
marvelBibles = self.getMarvelBibles()
if not texts:
return self.invalidCommand()
else:
self.cancelBibleParallels()
text = texts[0]
if text in marvelBibles:
fileItems = marvelBibles[text][0]
if os.path.isfile(os.path.join(*fileItems)):
return self.textBibleVerseParser(references, text, source)
else:
databaseInfo = marvelBibles[text]
self.parent.downloadHelper(databaseInfo)
return ("", "", {})
else:
return self.textBibleVerseParser(references, text, source)
# TEXT:::
def textText(self, command, source):
texts = self.getConfirmedTexts(command)
if not texts:
return self.invalidCommand()
else:
self.cancelBibleParallels()
marvelBibles = self.getMarvelBibles()
text = texts[0]
if text in marvelBibles:
fileItems = marvelBibles[text][0]
if os.path.isfile(os.path.join(*fileItems)):
if config.enforceCompareParallel:
self.parent.enforceCompareParallelButtonClicked()
updateViewConfig, viewText, viewReference, *_ = self.getViewConfig(source)
return self.textBibleVerseParser(viewReference, texts[0], source)
else:
databaseInfo = marvelBibles[text]
self.parent.downloadHelper(databaseInfo)
return ("", "", {})
else:
if config.enforceCompareParallel:
self.parent.enforceCompareParallelButtonClicked()
updateViewConfig, viewText, viewReference, *_ = self.getViewConfig(source)
return self.textBibleVerseParser(viewReference, texts[0], source)
# _chapters:::
def textChapters(self, command, source):
texts = self.getConfirmedTexts(command)
if not texts:
return self.invalidCommand()
else:
books = BibleBooks().booksMap.get(config.standardAbbreviation, BibleBooks.abbrev["eng"])
text = texts[0]
bible = Bible(text)
info = bible.bibleInfo()
bookList = bible.getBookList()
html = """<h2 style='text-align: center;'>{0} <button title='{1}' type='button' class='ubaButton' onclick='document.title="_menu:::"'><span class="material-icons-outlined">more_vert</span></button></h2>""".format(info, config.thisTranslation["menu_more"])
for bNo in bookList:
if bNo == config.mainB:
html += f'<span id="v{config.mainB}.{config.mainC}.{config.mainV}"></span>'
bkNoStr = str(bNo)
if bkNoStr in books:
abb = books[bkNoStr][0]
chapterList = bible.getChapterList(bNo)
commandPrefix = f"_verses:::{text}:::{abb} "
html += HtmlGeneratorUtil.getBibleChapterTable(books[bkNoStr][1], abb, chapterList, commandPrefix)
return (source, html, {})
# _verses:::
def textVerses(self, command, source):
if command.count(":::") == 0:
updateViewConfig, viewText, *_ = self.getViewConfig(source)
command = "{0}:::{1}".format(viewText, command)
texts, references = self.splitCommand(command)
texts = self.getConfirmedTexts(texts)
verseList = self.extractAllVerses(references)
if texts and verseList:
text = texts[0]
books = BibleBooks().booksMap.get(config.standardAbbreviation, BibleBooks.abbrev["eng"])
b, c, *_ = verseList[0]
abb = books[str(b)][0]
bible = Bible(text)
chapterVerseList = bible.getVerseList(b, c)
window = "STUDY" if source.lower() == "study" else "BIBLE"
commandPrefix = f"{window}:::{text}:::{abb} {c}:"
html = "<h2 style='text-align: center;'>{0}</h2>".format(text)
html += HtmlGeneratorUtil.getBibleVerseTable(books[str(b)][1], abb, c, chapterVerseList, commandPrefix)
return (source, html, {})
else:
return self.invalidCommand()
# _commentaries:::
def textCommentaries(self, command, source):
html = ""
for index, text in enumerate(self.parent.commentaryList):
fullName = self.parent.commentaryFullNameList[index]
html += """<p style='text-align: center;'><button title='{0}' type='button' class='ubaButton' onclick='document.title="_commentarychapters:::{0}"'>{1}</button></p>""".format(text, fullName)
return ("study", html, {})
# _commentarychapters:::
def textCommentaryChapters(self, command, source):
if not command in self.parent.commentaryList:
return self.invalidCommand()
else:
books = BibleBooks().booksMap.get(config.standardAbbreviation, BibleBooks.abbrev["eng"])
commentary = Commentary(command)
bookList = commentary.getBookList()
info = commentary.commentaryInfo()
if info == "https://Marvel.Bible Commentary" and command in Commentary.marvelCommentaries:
info = Commentary.marvelCommentaries[command]
#moreLink = """<p style='text-align: center;'>[ <ref onclick="window.parent.submitCommand('.library')">{0}</ref> ]</p>""".format(config.thisTranslation["change"]) if config.enableHttpServer else ""
html = """<h2 style='text-align: center;'>{0} <button title='{1}' type='button' class='ubaButton' onclick='document.title="_commentaries:::"'><span class="material-icons-outlined">more_vert</span></button></h2>""".format(info, config.thisTranslation["menu_more"])
for bNo in bookList:
bkNoStr = str(bNo)
if bkNoStr in books:
abb = books[bkNoStr][0]
chapterList = commentary.getChapterList(bNo)
commandPrefix = f"_commentaryverses:::{command}:::{abb} "
html += HtmlGeneratorUtil.getBibleChapterTable(books[bkNoStr][1], abb, chapterList, commandPrefix)
return ("study", html, {})
# _commentaryverses:::
def textCommentaryVerses(self, command, source):
if command.count(":::") == 0:
updateViewConfig, viewText, *_ = self.getViewConfig(source)
command = "{0}:::{1}".format(viewText, command)
text, references = self.splitCommand(command)
verseList = self.extractAllVerses(references)
if text in self.parent.commentaryList and verseList:
b, c, *_ = verseList[0]
if b > 0 and b <= 66:
books = BibleBooks().booksMap.get(config.standardAbbreviation, BibleBooks.abbrev["eng"])
abb = books[str(b)][0]
bible = Bible("KJV")
chapterVerseList = bible.getVerseList(b, c)
commandPrefix = f"COMMENTARY:::{text}:::{abb} {c}:"
html = "<h2 style='text-align: center;'>{0}</h2>".format(text)
html += HtmlGeneratorUtil.getBibleVerseTable(books[str(b)][1], abb, c, chapterVerseList, commandPrefix)
return ("study", html, {})
else:
return self.invalidCommand()
# MAIN:::
def textMain(self, command, source):
return self.textAnotherView(command, source, "main")
# STUDY:::
def textStudy(self, command, source):
if config.openBibleInMainViewOnly and not config.noQt:
self.parent.enableStudyBibleButtonClicked()
return self.textAnotherView(command, source, "study")
# STUDYTEXT:::
def textStudyText(self, command, source):
command = "{0}:::{1}".format(command, self.bcvToVerseReference(config.studyB, config.studyC, config.studyV))
return self.textStudy(command, "study")
# _copy:::
def copyText(self, command, source):
try:
if config.runMode == "terminal":
config.mainWindow.copy(command)
elif config.qtLibrary == "pyside6":
from PySide6.QtWidgets import QApplication
else:
from qtpy.QtWidgets import QApplication
QApplication.clipboard().setText(command)
self.parent.displayMessage(config.thisTranslation["copied"])
except:
return self.invalidCommand()
return ("", "", {})
# TRANSLATE:::
# Translate text using IBM Watson service
# It works only if user entered their own personal credential and store in config.py locally on users' computer.
# The store credentials are only used only for communicating with IBM Watson service with python package 'ibm-watson'
# UBA does not collect any of these data.
def translateText(self, command, source):
translator = Translator()
# Use IBM Watson service to translate text
if translator.language_translator is not None:
# unpack command
if command.count(":::") == 0:
fromLanguage = translator.identify(command)
toLanguage = "en"
if not fromLanguage in Translator.fromLanguageCodes:
fromLanguage = "en"
if config.userLanguage in Translator.toLanguageCodes:
toLanguage = config.userLanguage
text = command
else:
language, text = self.splitCommand(command)
if "fr-CA" in language:
language = language.replace("fr-CA", "fr@CA")
if "zh-TW" in language:
language = language.replace("zh-TW", "zh@TW")
if language.count("-") != 1:
self.parent.displayMessage(config.thisTranslation["message_invalid"])
else:
fromLanguage, toLanguage = language.split("-")
if "@" in fromLanguage:
fromLanguage = fromLanguage.replace("@", "-")
if "@" in toLanguage:
toLanguage = toLanguage.replace("@", "-")
if not fromLanguage in Translator.fromLanguageCodes:
fromLanguage = "en"
if not toLanguage in Translator.toLanguageCodes:
toLanguage = "en"
# translate here
translation = translator.translate(text, fromLanguage, toLanguage)
self.parent.displayMessage(translation)
if config.autoCopyTranslateResult and not config.noQt:
if config.qtLibrary == "pyside6":
from PySide6.QtWidgets import QApplication
else:
from qtpy.QtWidgets import QApplication
QApplication.clipboard().setText(translation)
else:
self.parent.displayMessage(config.thisTranslation["ibmWatsonNotEnalbed"])
self.parent.openWebsite("https://github.com/eliranwong/UniqueBible/wiki/IBM-Watson-Language-Translator")
return ("", "", {})
# This function below is an old way to process TRANSLATE::: command with goolgetrans
# However, we found googletrans no longer works with UBA.
# We keep the following function for further reference only.
def translateText_old(self, command, source):
languages = Languages().codes
# unpack command
if command.count(":::") == 0:
if config.userLanguage:
language = languages[config.userLanguage]
else:
language = "en"
text = command
else:
language, text = self.splitCommand(command)
# run google translate
if language in languages.values():
self.parent.mainView.translateTextIntoUserLanguage(text, language)
else:
self.parent.mainView.displayMessage(config.thisTranslation["message_invalid"])
return ("", "", {})
# called by MAIN::: & STUDY:::
def textAnotherView(self, command, source, target):
if command.count(":::") == 0:
updateViewConfig, viewText, *_ = self.getViewConfig(target)
command = "{0}:::{1}".format(viewText, command)
texts, references = self.splitCommand(command)
texts = self.getConfirmedTexts(texts)
if not texts:
return self.invalidCommand()
else:
self.cancelBibleParallels()
marvelBibles = self.getMarvelBibles()
text = texts[0]
if text in marvelBibles:
fileItems = marvelBibles[text][0]
if os.path.isfile(os.path.join(*fileItems)):
return self.textBibleVerseParser(references, texts[0], target)
else:
databaseInfo = marvelBibles[text]
self.parent.downloadHelper(databaseInfo)
return ("", "", {})
else:
return self.textBibleVerseParser(references, texts[0], target)
# distinctinterlinear:::
def distinctInterlinear(self, command, source):
translations = MorphologySqlite().distinctMorphology(command)
display = " | ".join(translations)
return ("study", display, {})
# distincttranslation:::
def distinctTranslation(self, command, source):
translations = MorphologySqlite().distinctMorphology(command, "Translation")
display = " | ".join(translations)
return ("study", display, {})
# COMPARE:::
def textCompare(self, command, source):
if command.count(":::") == 0:
confirmedTexts = ["ALL"]
verseList = self.extractAllVerses(command)
else:
texts, references = self.splitCommand(command)
confirmedTexts = self.getConfirmedTexts(texts)
verseList = self.extractAllVerses(references)
if not confirmedTexts or not verseList:
return self.invalidCommand()
else:
if config.runMode == "terminal" and not confirmedTexts == ["ALL"]:
config.compareParallelList = confirmedTexts
config.terminalBibleComparison = True
biblesSqlite = BiblesSqlite()
config.mainCssBibleFontStyle = ""
texts = confirmedTexts
if confirmedTexts == ["ALL"]:
plainBibleList, formattedBibleList = biblesSqlite.getTwoBibleLists()
texts = set(plainBibleList + formattedBibleList)
for text in texts:
(fontFile, fontSize, css) = Bible(text).getFontInfo()
config.mainCssBibleFontStyle += css
verses = biblesSqlite.compareVerse(verseList, confirmedTexts)
updateViewConfig, viewText, *_ = self.getViewConfig(source)
updateViewConfig(viewText, verseList[-1])
return ("study" if config.compareOnStudyWindow else "main", verses, {})
# SIDEBYSIDE:::
def textCompareSideBySide(self, command, source):
if command.count(":::") == 0:
if config.compareParallelList:
versions = "_".join(config.compareParallelList)
command = f"{versions}:::{command}"
else:
return ("", "", {})
texts, references = self.splitCommand(command)
confirmedTexts = self.getConfirmedTexts(texts)
verseList = self.extractAllVerses(references)
if not confirmedTexts or not verseList:
return self.invalidCommand()
else:
biblesSqlite = BiblesSqlite()
config.mainCssBibleFontStyle = ""
texts = confirmedTexts
for text in texts:
(fontFile, fontSize, css) = Bible(text).getFontInfo()
config.mainCssBibleFontStyle += css
verses = biblesSqlite.parallelVerse(verseList, confirmedTexts)
updateViewConfig, viewText, *_ = self.getViewConfig(source)
updateViewConfig(viewText, verseList[-1])
if config.runMode == "terminal":
verses = f"[BROWSER]{verses}"
return ("study" if config.compareOnStudyWindow else "main", verses, {})
# DIFF:::
# DIFFERENCE:::
def textDiff(self, command, source):
if command.count(":::") == 0:
confirmedTexts = ["ALL"]
verseList = self.extractAllVerses(command)
else:
texts, references = self.splitCommand(command)
confirmedTexts = self.getConfirmedTexts(texts)
verseList = self.extractAllVerses(references)
if not confirmedTexts or not verseList:
return self.invalidCommand()
else:
biblesSqlite = BiblesSqlite()
verses = biblesSqlite.diffVerse(verseList, confirmedTexts)
updateViewConfig, viewText, *_ = self.getViewConfig(source)
updateViewConfig(viewText, verseList[-1])
return (source, verses, {})
# PARALLEL:::
def textParallel(self, command, source):
updateViewConfig, viewText, *_ = self.getViewConfig(source)
if command.count(":::") == 0:
command = "{0}:::{1}".format(viewText, command)
texts, references = self.splitCommand(command)
confirmedTexts = self.getConfirmedTexts(texts)
if not confirmedTexts:
return self.invalidCommand()
else:
marvelBibles = self.getMarvelBibles()
missingMarvelTexts = [text for text in confirmedTexts if text in marvelBibles and not os.path.isfile(os.path.join(*marvelBibles[text][0]))]
if missingMarvelTexts:
databaseInfo = marvelBibles[missingMarvelTexts[0]]
self.parent.downloadHelper(databaseInfo)
return ("", "", {})
else:
if source in ('cli'):
tableList = ["{0} {1}".format(text, self.textBibleVerseParser(references, text, source, True)[1])
for text in confirmedTexts]
return("study" if config.compareOnStudyWindow else "main", "<br>".join(tableList), {})
else:
mainText = config.mainText
tableList = [("<th><ref onclick='document.title=\"TEXT:::{0}\"'>{0}</ref></th>".format(text),
"<td style='vertical-align: text-top;'><bibletext class={1}>{0}</bibletext></td>"
.format(self.textBibleVerseParser(references, text, source, True)[1], text))
for text in confirmedTexts]
versions, verses = zip(*tableList)
config.maiupdateViewConfignCssBibleFontStyle = ""
for text in confirmedTexts:
(fontFile, fontSize, css) = Bible(text).getFontInfo()
config.mainCssBibleFontStyle += css
config.mainText = mainText
self.parent.setBibleSelection()
return ("study" if config.compareOnStudyWindow else "main", "<table style='width:100%; table-layout:fixed;'><tr>{0}</tr><tr>{1}</tr></table>".format("".join(versions), "".join(verses)), {})
# PASSAGES:::
def textPassages(self, command, source):
updateViewConfig, viewText, *_ = self.getViewConfig(source)
if command.count(":::") == 0:
command = "{0}:::{1}".format(viewText, command)
texts, references = self.splitCommand(command)
confirmedTexts = self.getConfirmedTexts(texts)
if not confirmedTexts:
return self.invalidCommand()
else:
text = confirmedTexts[0]
marvelBibles = self.getMarvelBibles()
if text in marvelBibles and not os.path.isfile(os.path.join(*marvelBibles[text][0])):
databaseInfo = marvelBibles[text]
self.parent.downloadHelper(databaseInfo)
return ("", "", {})
else:
bibleVerseParser = BibleVerseParser(config.parserStandarisation)
biblesSqlite = BiblesSqlite()
passages = bibleVerseParser.extractAllReferences(references)
if passages:
tableList = [("<th><ref onclick='document.title=\"BIBLE:::{0}\"'>{0}</ref></th>".format(bibleVerseParser.bcvToVerseReference(*passage)), "<td style='vertical-align: text-top;'>{0}</td>".format(biblesSqlite.readMultipleVerses(text, [passage], displayRef=False))) for passage in passages]
versions, verses = zip(*tableList)
b, c, v, *_ = passages[-1]
updateViewConfig(text, (b, c, v))
return (source, "<table style='width:100%; table-layout:fixed;'><tr>{0}</tr><tr>{1}</tr></table>".format("".join(versions), "".join(verses)), {})
else:
return self.invalidCommand()
# _harmony:::
def textHarmony(self, command, source):
updateViewConfig, viewText, *_ = self.getViewConfig(source)
if command.count(":::") == 0:
command = "{0}:::{1}".format(viewText, command)
texts, references = self.splitCommand(command)
confirmedTexts = self.getConfirmedTexts(texts)
if not confirmedTexts:
return self.invalidCommand()
else:
text = confirmedTexts[0]
marvelBibles = self.getMarvelBibles()
if text in marvelBibles and not os.path.isfile(os.path.join(*marvelBibles[text][0])):
databaseInfo = marvelBibles[text]
self.parent.downloadHelper(databaseInfo)
return ("", "", {})
else:
bibleVerseParser = BibleVerseParser(config.parserStandarisation)
biblesSqlite = BiblesSqlite()
cs = CollectionsSqlite()
topic, passagesString = cs.readData("PARALLEL", references.split("."))
passages = bibleVerseParser.extractAllReferences(passagesString, tagged=True)
tableList = [("<th><ref onclick='document.title=\"BIBLE:::{0}\"'>{0}</ref></th>".format(bibleVerseParser.bcvToVerseReference(*passage)), "<td style='vertical-align: text-top;'>{0}</td>".format(biblesSqlite.readMultipleVerses(text, [passage], displayRef=False))) for passage in passages]
versions, verses = zip(*tableList)
window = "main" if config.openBibleInMainViewOnly else "study"
return (window,
"<h2>{2}</h2><table style='width:100%; table-layout:fixed;'><tr>{0}</tr><tr>{1}</tr></table>"
.format("".join(versions), "".join(verses), topic), {})
# _promise:::
def textPromise(self, command, source):
updateViewConfig, viewText, *_ = self.getViewConfig(source)
if command.count(":::") == 0:
command = "{0}:::{1}".format(viewText, command)
texts, references = self.splitCommand(command)
confirmedTexts = self.getConfirmedTexts(texts)
if not confirmedTexts:
return self.invalidCommand()
else:
text = confirmedTexts[0]
marvelBibles = self.getMarvelBibles()
if text in marvelBibles and not os.path.isfile(os.path.join(*marvelBibles[text][0])):
databaseInfo = marvelBibles[text]
self.parent.downloadHelper(databaseInfo)
return ("", "", {})
else:
bibleVerseParser = BibleVerseParser(config.parserStandarisation)
biblesSqlite = BiblesSqlite()
cs = CollectionsSqlite()
topic, passagesString = cs.readData("PROMISES", references.split("."))
passages = bibleVerseParser.extractAllReferences(passagesString, tagged=True)
return ("study", "<h2>{0}</h2>{1}".format(topic, biblesSqlite.readMultipleVerses(text, passages)), {})
# _biblenote:::
def textBiblenote(self, command, source):
text, references = self.splitCommand(command)
if text in self.getConfirmedTexts(text):
bible = Bible(text)
note = bible.readBiblenote(references)
return ("study", note, {})
else:
return self.invalidCommand()
# openbooknote:::
def openBookNoteRef(self, command, source):
if not " " in command:
command = "{0} 1".format(command)
verseList = self.extractAllVerses(command)
if verseList:
b, *_ = verseList[0]
return self.openBookNote(str(b), source)
else:
return self.invalidCommand()
# _openbooknote:::
def openBookNote(self, command, source):
try:
if command:
b, *_ = command.split(".")
b = int(b)
else:
b = config.mainB
if config.runMode == "terminal":
content = NoteSqlite().getBookNote(b)[0]
return ("", content, {})
else:
self.parent.openBookNote(b)
return ("", "", {})
except:
return self.invalidCommand()
# openchapternote:::
def openChapterNoteRef(self, command, source):
verseList = self.extractAllVerses(command)
if verseList:
b, c, *_ = verseList[0]
return self.openChapterNote("{0}.{1}".format(b, c), source)
else:
return self.invalidCommand()
# _openchapternote:::
def openChapterNote(self, command, source):
try:
if command:
b, c, *_ = command.split(".")
b, c = int(b), int(c)
else:
b, c = config.mainB, config.mainC
if config.runMode == "terminal":
content = NoteSqlite().getChapterNote(b, c)[0]
return ("", content, {})
else:
self.parent.openChapterNote(b, c)
return ("", "", {})
except:
return self.invalidCommand()
# openversenote:::
def openVerseNoteRef(self, command, source):
verseList = self.extractAllVerses(command)
if verseList:
b, c, v, *_ = verseList[0]
return self.openVerseNote("{0}.{1}.{2}".format(b, c, v), source)
else:
return self.invalidCommand()
# _openversenote:::
def openVerseNote(self, command, source):
try:
if command:
b, c, v, *_ = command.split(".")
b, c, v = int(b), int(c), int(v)
else:
b, c, v = config.mainB, config.mainC, config.mainV
if config.runMode == "terminal":
content = NoteSqlite().getVerseNote(b, c, v)[0]
return ("", content, {})
else:
self.parent.openVerseNote(b, c, v)
return ("", "", {})
except:
return self.invalidCommand()
# editbooknote:::
def editBookNoteRef(self, command, source):
if not " " in command:
command = "{0} 1".format(command)
verseList = self.extractAllVerses(command)
if verseList:
b, *_ = verseList[0]
return self.editBookNote(str(b), source)
else:
return self.invalidCommand()
# _editbooknote:::
def editBookNote(self, command, source):
try:
if command:
b, *_ = command.split(".")
c = 1
v = 1
else:
b, c, v = config.mainB, 1, 1
if config.runMode == "terminal":
config.mainWindow.openNoteEditor("book", b=b, c=c, v=v)
return ("", "[MESSAGE]Text Editor Closed", {})
elif self.parent.noteSaved or self.parent.warningNotSaved():
self.parent.openNoteEditor("book", b=b, c=c, v=v)
return ("", "", {})
except:
return self.invalidCommand()
# editchapternote:::
def editChapterNoteRef(self, command, source):
verseList = self.extractAllVerses(command)
if verseList:
b, c, *_ = verseList[0]
return self.editChapterNote("{0}.{1}".format(b, c), source)
else:
return self.invalidCommand()
# _editchapternote:::
def editChapterNote(self, command, source):
try:
if command:
b, c, *_ = command.split(".")
v = 1
else:
b, c, v = config.mainB, config.mainC, 1
if config.runMode == "terminal":
config.mainWindow.openNoteEditor("chapter", b=b, c=c, v=v)
return ("", "[MESSAGE]Text Editor Closed", {})
elif self.parent.noteSaved or self.parent.warningNotSaved():
self.parent.openNoteEditor("chapter", b=b, c=c, v=v)
return ("", "", {})
except:
return self.invalidCommand()
# editversenote:::
def editVerseNoteRef(self, command, source):
verseList = self.extractAllVerses(command)
if verseList:
b, c, v, *_ = verseList[0]
return self.editVerseNote("{0}.{1}.{2}".format(b, c, v), source)
else:
return self.invalidCommand()
# _editversenote:::
def editVerseNote(self, command, source):
try:
if command:
b, c, v, *_ = command.split(".")
else:
b, c, v = config.mainB, config.mainC, config.mainV
if config.runMode == "terminal":
config.mainWindow.openNoteEditor("verse", b=b, c=c, v=v)
return ("", "[MESSAGE]Text Editor Closed", {})
elif self.parent.noteSaved or self.parent.warningNotSaved():
self.parent.openNoteEditor("verse", b=b, c=c, v=v)
#else:
#self.parent.noteEditor.raise_()
return ("", "", {})
except:
return self.invalidCommand()
# openjournal:::
def openJournalNote(self, command, source):
try:
if command:
year, month, day, *_ = command.split("-")
year, month, day = int(year), int(month), int(day)
else:
today = date.today()
year, month, day = today.year, today.month, today.day
journalSqlite = JournalSqlite()
note = journalSqlite.getJournalNote(year, month, day)
return ("study", note, {})
except:
return self.invalidCommand()
# editjournal:::
def editJournalNote(self, command, source):
try:
if command:
year, month, day, *_ = command.split("-")
else:
today = date.today()
year, month, day = today.year, today.month, today.day
if config.runMode == "terminal":
config.mainWindow.openNoteEditor("journal", year=year, month=month, day=day)
return ("", "[MESSAGE]Text Editor Closed", {})
elif self.parent.noteSaved or self.parent.warningNotSaved():
self.parent.openNoteEditor("journal", year=year, month=month, day=day)
return ("", "", {})
except:
return self.invalidCommand()
# _open:::
def openMarvelDataFile(self, command, source):
fileitems = command.split("/")
filePath = os.path.join(config.marvelData, *fileitems)
if config.runMode == "terminal":
return self.osCommand(f"{config.open} {filePath}", source)
elif config.enableHttpServer and re.search("\.jpg$|\.jpeg$|\.png$|\.bmp$|\.gif$", filePath.lower()):
fullPath = os.path.join(os.getcwd(), filePath)
if os.path.isfile(fullPath):
# config.marvelData is a relative path
# relative path outside htmlresource directory does not work on http-server, though it works on desktop version
#filePath = "../"+filePath
#return ("study", "<img src='{0}'>".format(filePath), {})
return ("study", TextUtil.imageToText(fullPath), {})
elif os.path.isfile(filePath):
# config.marvelData is an absolute path
return ("study", TextUtil.imageToText(filePath), {})
else:
return ("study", "Image not found!", {})
elif config.enableHttpServer:
return ("study", "[File type not supported!]", {})
elif re.search("\.bmp$|\.jpg$|\.jpeg$|\.png$|\.pbm$|\.pgm$|\.ppm$|\.xbm$|\.xpm$", filePath.lower()):
from gui.ImageViewer import ImageViewer
imageViewer = ImageViewer(self.parent)
imageViewer.show()
imageViewer.load_file(filePath)
return ("", "", {})
else:
self.parent.openExternalFile(filePath)
return ("", "", {})
# open:::
def openExternalFile(self, command, source):
fileitems = command.split("/")
filePath = os.path.join(*fileitems)
if config.runMode == "terminal":
return self.osCommand(f"{config.open} {filePath}", source)
elif config.enableHttpServer:
return ("study", TextUtil.imageToText(filePath), {})
elif re.search("\.bmp$|\.jpg$|\.jpeg$|\.png$|\.pbm$|\.pgm$|\.ppm$|\.xbm$|\.xpm$", filePath.lower()):
from gui.ImageViewer import ImageViewer
imageViewer = ImageViewer(self.parent)
imageViewer.show()
imageViewer.load_file(filePath)
return ("", "", {})
else:
self.parent.openExternalFile(filePath)
return ("", "", {})
# docx:::
def openDocxReader(self, command, source):
if command:
self.parent.openTextFile(os.path.join(config.marvelData, "docx", command))
return ("", "", {})
# opennote:::
def textOpenNoteFile(self, command, source):
if command:
self.parent.openTextFile(command)
return ("", "", {})
# _openfile:::
def textOpenFile(self, command, source):
fileName = config.history["external"][int(command)]
if fileName:
self.parent.openTextFile(fileName)
return ("", "", {})
# _editfile:::
def textEditFile(self, command, source):
if command:
self.parent.editExternalFileHistoryRecord(int(command))
return ("", "", {})
# _website:::
def textWebsite(self, command, source):
if command:
if config.enableHttpServer and command.startswith("http"):
subprocess.Popen("{0} {1}".format(config.open, command), shell=True)
elif config.runMode == "terminal" and config.terminalEnableTermuxAPI:
#os.system(f"{config.open} command")
return self.osCommand(f"{config.open} {command}", source)
else:
webbrowser.open(command)
return ("", "", {})
else:
return self.invalidCommand()
# _uba:::
def textUba(self, command, source):
if command:
pathItems = command[7:].split("/")
file = os.path.join(*pathItems)
config.history["external"].append(file)
self.parent.openExternalFileHistoryRecord(-1)
return ("", "", {})
else:
return self.invalidCommand()
# _info:::
def textInfo(self, command, source):
if config.instantInformationEnabled:
return ("instant", command, {})
else:
return ("", "", {})
# _lexicaldata:::
def instantLexicalData(self, command, source):
allInfo = []
for item in command.split("_"):
info = LexicalData.getLexicalData(item, True)
if info:
allInfo.append(info)
allInfo = "<hr>".join(allInfo)
return ("instant", allInfo, {})
# _instantverse:::
def instantVerse(self, command, source):
if config.instantInformationEnabled:
morphologySqlite = MorphologySqlite()
*_, commandList = self.splitCommand(command)
elements = commandList.split(".")
if len(elements) == 3:
b, c, v = [int(i) for i in elements]
info = morphologySqlite.instantVerse(b, c, v)
return ("instant", info, {})
elif len(elements) == 4:
b, c, v, wordID = elements
info = morphologySqlite.instantVerse(int(b), int(c), int(v), wordID)
return ("instant", info, {})
else:
return self.invalidCommand()
else:
return ("", "", {})
# _imvr:::
def instantMainVerseReference(self, command, source):
text = config.mainText
if ":::" in command:
text, verseList = self.splitCommand(command)
verseList = self.extractAllVerses(command)
if verseList:
return self.instantMainVerse(".".join([str(i) for i in verseList[0]]), source, text)
else:
return ("", "", {})
# _imv:::
def instantMainVerse(self, command, source, text=""):
if not text or not text in self.parent.textList:
text = config.mainText
if config.instantInformationEnabled and command:
info = self.getInstantMainVerseInfo(command, text)
return ("instant", info, {})
else:
return ("", "", {})
def getInstantMainVerseInfo(self, command, text):
bcvList = [int(i) for i in command.split(".")]
info = BiblesSqlite().readMultipleVerses(text, [bcvList])
if text in config.rtlTexts and bcvList[0] < 40:
info = "<div style='direction: rtl;'>{0}</div>".format(info)
return info
# _instantword:::
def instantWord(self, command, _):
if config.instantInformationEnabled:
info = self.getInstantWordInfo(command)
return ("instant", info, {})
else:
return ("", "", {})
def getInstantWordInfo(self, command):
commandList = self.splitCommand(command)
morphologySqlite = MorphologySqlite()
wordID = commandList[1]
wordID = re.sub('^[h0]+?([^h0])', r'\1', wordID, flags=re.M)
return morphologySqlite.instantWord(int(commandList[0]), int(wordID))
# _bibleinfo:::
def textBibleInfo(self, command, source):
if self.getConfirmedTexts(command):
biblesSqlite = BiblesSqlite()
info = biblesSqlite.bibleInfo(command)
if info:
return ("instant", info, {})
else:
return ("", "", {})
else:
return self.invalidCommand()
# _commentaryinfo:::
def textCommentaryInfo(self, command, source):
commentaryFile = os.path.join(config.commentariesFolder, "c{0}.commentary".format(command))
if os.path.isfile(commentaryFile):
if command in Commentary.marvelCommentaries:
return ("instant", Commentary.marvelCommentaries[command], {})
else:
commentarySqlite = Commentary(command)
info = commentarySqlite.commentaryInfo()
if info:
return ("instant", info, {})
else:
return ("", "", {})
else:
return self.invalidCommand()
# mapping verse action
def mapVerseAction(self, keyword, verseReference, source):
if self.isDatabaseInstalled(keyword.lower()):
self.lastKeyword = keyword.lower()
actionMap = {
"COMPARE": self.textCompare,
"CROSSREFERENCE": self.textCrossReference,
"TSKE": self.tske,
"TRANSLATION": self.textTranslation,
"DISCOURSE": self.textDiscourse,
"WORDS": self.textWords,
"COMBO": self.textCombo,
"INDEX": self.textIndex,
"COMMENTARY": self.textCommentary,
"STUDY": self.textStudy,
"_noAction": self.noAction,
}
return actionMap[keyword](verseReference, source)
else:
return self.databaseNotInstalled(keyword.lower())
# _menu:::
def textMenu(self, command, source):
try:
dotCount = command.count(".")
if dotCount == 3 and config.enableHttpServer:
text, b, c, v = command.split(".")
config.mainText, config.mainB, config.mainC, config.mainV = text, int(b), int(c), int(v)
bibleCommand = "BIBLE:::{0}:::{1} {2}:{3}".format(text, BibleBooks.abbrev["eng"][b][0], config.mainC, config.mainV)
self.parent.addHistoryRecord("main", bibleCommand)
menu = HtmlGeneratorUtil().getMenu(command, source)
return (source, menu, {})
except:
return self.invalidCommand()
# _comparison:::
def textComparisonMenu(self, command, source):
try:
menu = HtmlGeneratorUtil().getComparisonMenu(command, source)
return (source, menu, {})
except:
return self.invalidCommand()
# _vndc:::
def verseNoDoubleClick(self, command, source):
if not command:
command = f"{config.mainText}.{config.mainB}.{config.mainC}.{config.mainV}"
dotCount = command.count(".")
if dotCount == 3 and config.enableHttpServer:
text, b, c, v = command.split(".")
config.mainText, config.mainB, config.mainC, config.mainV = text, int(b), int(c), int(v)
bibleCommand = "BIBLE:::{0}:::{1} {2}:{3}".format(text, BibleBooks.abbrev["eng"][b][0], config.mainC, config.mainV)
self.parent.addHistoryRecord("main", bibleCommand)
if dotCount != 3 or config.verseNoDoubleClickAction == "_menu" or (config.enableHttpServer and config.verseNoDoubleClickAction.startswith("_cp")):
if dotCount == 2 and not config.preferHtmlMenu and not config.enableHttpServer:
text, b, c = command.split(".")
self.parent.openControlPanelTab(0, int(b), int(c), int(1), text),
return ("", "", {})
else:
menu = HtmlGeneratorUtil().getMenu(command, source)
return (source, menu, {})
elif config.verseNoDoubleClickAction in ("none", "_noAction"):
return self.noAction(command, source)
elif config.verseNoDoubleClickAction.startswith("_cp"):
index = int(config.verseNoDoubleClickAction[-1])
text, b, c, v = command.split(".")
self.parent.openControlPanelTab(index, int(b), int(c), int(v), text),
return ("", "", {})
else:
compareOnMain = (config.verseNoSingleClickAction == "COMPARE" and not config.compareOnStudyWindow)
*_, b, c, v = command.split(".")
verseReference = "{0} {1}:{2}".format(BibleBooks().abbrev["eng"][b][0], c, v)
self.parent.addHistoryRecord("main" if compareOnMain else "study", "{0}:::{1}".format(config.verseNoDoubleClickAction, verseReference))
return self.mapVerseAction(config.verseNoDoubleClickAction, verseReference, source)
# _vnsc:::
def verseNoSingleClick(self, command, source):
if not command:
command = f"{config.mainText}.{config.mainB}.{config.mainC}.{config.mainV}"
if command.count(".") != 4:
return self.invalidCommand()
compareOnMain = (config.verseNoSingleClickAction == "COMPARE" and not config.compareOnStudyWindow)
text, b, c, v, verseReference = command.split(".")
bibleCommand = "BIBLE:::{0}:::{1}".format(text, verseReference)
if config.enableHttpServer:
config.mainText, config.mainB, config.mainC, config.mainV = text, int(b), int(c), int(v)
self.parent.addHistoryRecord("main", bibleCommand)
elif not compareOnMain:
self.parent.passRunTextCommand(bibleCommand, True, source)
if not config.verseNoSingleClickAction.upper() == config.syncAction.upper():
if config.verseNoSingleClickAction == "_menu" or (config.enableHttpServer and config.verseNoSingleClickAction.startswith("_cp")):
menu = HtmlGeneratorUtil().getMenu("{0}.{1}.{2}.{3}".format(text, b, c, v), source)
return (source, menu, {})
elif config.verseNoSingleClickAction.startswith("_cp"):
index = int(config.verseNoSingleClickAction[-1])
self.parent.openControlPanelTab(index, int(b), int(c), int(v), text),
return ("", "", {})
else:
if not compareOnMain and config.syncAction == "STUDY":
self.parent.nextStudyWindowTab()
self.parent.addHistoryRecord("main" if compareOnMain else "study", "{0}:::{1}".format(config.verseNoSingleClickAction, verseReference))
return self.mapVerseAction(config.verseNoSingleClickAction, verseReference, source)
return ("", "", {})
# _cp:::
# _mastercontrol:::
def openMasterControl(self, command, source):
try:
if command and int(command) < 5:
index = int(command)
else:
index = 0
self.parent.openControlPanelTab(index, config.mainB, config.mainC, config.mainV, config.mainText),
return ("", "", {})
except:
return self.invalidCommand()
# _commentary:::
def textCommentaryMenu(self, command, source):
if config.enableHttpServer:
config.commentaryB, config.commentaryC, config.commentaryV = config.mainB, config.mainC, config.mainV
text, *_ = command.split(".")
commentary = Commentary(text)
commentaryMenu = commentary.getMenu(command)
return ("study", commentaryMenu, {})
# _book:::
def textBookMenu(self, command, source):
bookData = BookData()
bookMenu = bookData.getMenu(command)
config.bookChapNum = 0
self.parent.updateBookButton()
return ("study", bookMenu, {'tab_title':command[:20]})
# _history:::
def textHistory(self, command, source):
if command in ("main", "study"):
return (command, self.parent.getHistory(command), {})
else:
return self.invalidCommand()
# _historyrecord:::
def textHistoryRecord(self, command, source):
if source == "http":
source = "main"
if source in ("main", "study"):
recordNumber = int(command)
config.currentRecord[source] = recordNumber
textCommand = config.history[source][recordNumber]
return self.parser(textCommand, source)
else:
return self.invalidCommand()
# _command:::
def textCommand(self, command, source):
return ("command", command, {})
# _paste:::
def pasteFromClipboard(self, command, source):
if config.runMode == "terminal":
config.mainWindow.getclipboardtext()
elif ("Pyperclip" in config.enabled) and config.runMode == "terminal":
import pyperclip
content = pyperclip.paste()
return ("study", content, {})
else:
self.parent.pasteFromClipboard()
return ("", "", {})
# _whatis:::
def textWhatIs(self, command, source):
try:
command = command.lower().strip()
if config.runMode == "terminal" and command in config.mainWindow.dotCommands:
return ("study", config.mainWindow.dotCommands[command][0], {})
elif ":::"in command:
command, *_ = command.split(":::", 1)
content = self.interpreters[command][-1]
content = re.sub(" #", "<br>#", content)
return ("study", content, {})
except:
return self.invalidCommand()
# _htmlimage:::
def textHtmlImage(self, command, source):
if config.runMode == "terminal":
filepath = os.path.join("htmlResources", "images", command)
if config.terminalEnableTermuxAPI:
os.system(f"termux-share {filepath}")
else:
os.system(f"{config.open} {filepath}")
return ("", "", {})
else:
content = "<p align='center'><img src='images/{0}'><br><br><ref onclick='openHtmlFile({1}images/{0}{1})'>{0}</ref></p>".format(command, '"')
return ("popover.{0}".format(source), content, {})
# _image:::
def textImage(self, command, source):
module, entry = self.splitCommand(command)
imageSqlite = ImageSqlite()
imageSqlite.exportImage(module, entry)
if module == "EXLBL":
imageFile = "htmlResources/images/exlbl/EXLBL_{0}".format(entry)
else:
imageFile = "htmlResources/images/{0}/{0}_{1}".format(module, entry)
self.openExternalFile(imageFile, source)
return ("", "", {})
#content = "<img src='images/{0}/{0}_{1}'>".format(module, entry)
#return ("popover.{0}".format(source), content)
# COMMENTARY:::
def textCommentary(self, command, source):
try:
if command.count(":::") == 0:
command = "{0}:::{1}".format(config.commentaryText, command)
elif command.count(":::") == 1 and command.endswith(":::"):
command = "{0}{1}".format(command, self.bcvToVerseReference(config.mainB, config.mainC, config.mainV))
commandList = self.splitCommand(command)
if " " in commandList[1]:
verseList = self.extractAllVerses(commandList[1])
else:
verseList = [(BibleBooks.name2number[commandList[1]], 0, 0)]
if not len(commandList) == 2 or not verseList:
return self.invalidCommand()
else:
bcvTuple = verseList[0]
if config.enableHttpServer:
config.mainB, config.mainC, config.mainV, *_ = bcvTuple
module = commandList[0]
commentary = Commentary(module)
content = commentary.getContent(bcvTuple)
if not content == "INVALID_COMMAND_ENTERED":
self.setCommentaryVerse(module, bcvTuple)
return ("study", content, {'tab_title':'Com:' + module})
except:
return self.invalidCommand()
# COMMENTARY2:::
def textCommentary2(self, command, source):
if not command:
command = f"{config.commentaryB}.{config.commentaryC}.{config.commentaryV}"
if command.count(":::") == 0:
command = "{0}:::{1}".format(config.commentaryText, command)
commandList = self.splitCommand(command)
reference = commandList[1]
if re.search(r"^[0-9]+?\.[0-9]+?\.[0-9]+?$", reference):
verseList = [tuple([int(i) for i in reference.split(".")])]
if not len(commandList) == 2 or not verseList:
return self.invalidCommand()
else:
bcvTuple = verseList[0]
module = commandList[0]
commentary = Commentary(module)
content = commentary.getContent(bcvTuple)
if not content == "INVALID_COMMAND_ENTERED":
self.setCommentaryVerse(module, bcvTuple)
return ("study", content, {})
else:
return self.invalidCommand()
# SEARCHTOOL:::
def textSearchTool(self, command, source):
try:
origModule, entry = self.splitCommand(command)
if origModule == config.thisTranslation['searchAllDictionaries']:
modules = self.parent.dictionaryListAbb
else:
modules = [origModule]
TextCommandParser.last_text_search = entry
indexes = IndexesSqlite()
content = ""
toolList = [("", "[search other resources]"), ("EXLBP", "Exhaustive Library of Bible Characters"), ("EXLBL", "Exhaustive Library of Bible Locations")] + indexes.topicList + indexes.dictionaryList + indexes.encyclopediaList
for module in modules:
if module in dict(toolList[1:]).keys() or module in ("mRMAC", "mETCBC", "mLXX"):
action = "searchItem(this.value, \"{0}\")".format(entry)
selectList = indexes.formatSelectList(action, toolList)
if module in dict(indexes.topicList).keys():
config.topic = module
elif module in dict(indexes.dictionaryList).keys() and not module == "HBN":
config.dictionary = module
elif module in dict(indexes.encyclopediaList).keys():
config.encyclopedia = module
searchSqlite = SearchSqlite()
exactMatch = searchSqlite.getContent(module, entry)
similarMatch = searchSqlite.getSimilarContent(module, entry)
selectList = f"<p>{selectList}</p><p>" if not config.runMode == "terminal" else ""
content += "<h2>Search <span style='color: brown;'>{0}</span> for <span style='color: brown;'>{1}</span></h2>{4}<b>Exact match:</b><br><br>{2}</p><p><b>Partial match:</b><br><br>{3}".format(module, entry, exactMatch, similarMatch, selectList)
if len(content) > 0:
return ("study", f"[MESSAGE]{content}" if config.runMode == "terminal" else content, {'tab_title': 'Search:' + origModule + ':' + entry})
else:
return self.invalidCommand()
except:
return self.invalidCommand()
# GPTSEARCH:::
def textGPTSEARCHSearch(self, command, source):
import openai, traceback
try:
openai.api_key = os.environ["OPENAI_API_KEY"] = config.openaiApiKey
openai.organization = config.openaiApiOrganization
if command.count(":::") == 0:
texts = ""
query = command
else:
commandList = self.splitCommand(command)
texts, query = commandList
prompt = f"""Formulate a sql query over a table created with statement "CREATE TABLE Verses (Book INT, Chapter INT, Verse INT, Scripture TEXT)".
The book numbers range from 1 to 66, corresponding to the canonical order from Genesis to Revevlation in the bible.
I am providing you below with WHERE condition described in natural language.
Give me only the sql query statement, starting with "SELECT * FROM Verses WHERE " without any extra explanation or comment.
The WHERE condition is described as: {query}"""
# run ChatGPT to get a standard sql query
messages = [
{"role": "system", "content" : "You’re a kind helpful assistant"},
{"role": "user", "content" : prompt}
]
completion = openai.ChatCompletion.create(
model=config.chatGPTApiModel,
messages=messages,
n=1,
temperature=0.0,
max_tokens=2048,
)
sqlQuery = completion.choices[0].message.content
# check
#print(sqlQuery)
sqlQuery = re.sub("^SELECT . FROM Verses WHERE ", "", sqlQuery)
command = f"{texts}:::{sqlQuery}" if texts else sqlQuery
return self.textSearch(command, source, "ADVANCED", config.addFavouriteToMultiRef)
except:
response = "GPT search feature requires an OpenAI API Key; read https://github.com/eliranwong/UniqueBible/wiki/Search-Bible-with-Natural-Language-via-ChatGPT ; " + traceback.format_exc()
return ("study", response, {})
# SEMANTIC:::
def textSemanticSearch(self, command, source):
# upgrade package llama_index
if not self.llamaIndexUpdated:
try:
os.system("pip3 install --upgrade llama_index")
except:
pass
self.llamaIndexUpdated = True
# import packages
import openai, traceback, shutil
from llama_index.llms import OpenAI
from llama_index import SimpleDirectoryReader, ServiceContext, GPTVectorStoreIndex, StorageContext, load_index_from_storage
#from pathlib import Path
try:
openai.api_key = os.environ["OPENAI_API_KEY"] = config.openaiApiKey
openai.organization = config.openaiApiOrganization
if command.count(":::") == 0:
command = "{0}:::{1}".format(config.mainText, command)
commandList = self.splitCommand(command)
text, query = commandList
if not text in BiblesSqlite().getBibleList():
return self.invalidCommand()
persist_dir = os.path.join("llama_index", f"{text}_md_index")
bible_dir = os.path.join("temp", text)
def removeTempDir():
if os.path.isdir(bible_dir):
shutil.rmtree(bible_dir)
# build index if it does not exist
if not os.path.isdir(persist_dir):
# notify users
message = "Create indexes now ..."
print(message) if config.noQt else self.parent.displayMessage(message)
# load book information
#bibleBooks = BibleBooks()
# export bible text in markdown format
Bible(text).exportToMarkdown(standardReference=True)
# create index
# define LLM
llm = OpenAI(temperature=config.chatGPTApiTemperature, model=config.chatGPTApiModel, max_tokens=config.chatGPTApiMaxTokens)
service_context = ServiceContext.from_defaults(llm=llm)
documents = SimpleDirectoryReader(bible_dir, recursive=True, required_exts=[".md"]).load_data()
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir=persist_dir)
# remove exported bible text after indexes are created
removeTempDir()
# load index
storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
index = load_index_from_storage(storage_context)
# run query
query_engine = index.as_query_engine()
response = query_engine.query(query).response
# parse bible reference
if not config.runMode in ("terminal", "telnet-server"):
response = self.parent.htmlWrapper(response, parsing=True, view="study", linebreak=True, html=False)
except:
response = "Semantic search feature requires an OpenAI API Key; read https://github.com/eliranwong/UniqueBible/wiki/Semantic-Search ; " + traceback.format_exc()
return ("study", response, {})
# COUNT:::
def textCountSearch(self, command, source):
return self.textCount(command, config.addFavouriteToMultiRef)
# called by COUNT:::
def textCount(self, command, interlinear):
if command.count(":::") == 0:
command = "{0}:::{1}".format(config.mainText, command)
commandList = self.splitCommand(command)
texts, searchEntry = commandList
booksRange = ""
if searchEntry.count(":::") > 0:
searchEntry, booksRange = self.splitCommand(searchEntry)
texts = self.getConfirmedTexts(texts)
if texts and re.match("^[EHG][0-9]+?$", searchEntry):
return self.textConcordance(command, "study")
elif not len(commandList) == 2 or not texts:
return self.invalidCommand()
else:
biblesSqlite = BiblesSqlite()
searchResult = "<hr>".join([biblesSqlite.countSearchBible(text, searchEntry, interlinear, booksRange) for text in texts])
return ("study", searchResult, {})
# SEARCH:::
def textSearchBasic(self, command, source):
return self.textSearch(command, source, "BASIC", config.addFavouriteToMultiRef)
# SEARCHREFERECE:::
def textSearchReference(self, command, source):
return self.textSearch(command, source, "BASIC", config.addFavouriteToMultiRef, referenceOnly=True)
# REGEXSEARCH:::
def textSearchRegex(self, command, source):
return self.textSearch(command, source, "REGEX", config.addFavouriteToMultiRef)
# ADVANCEDSEARCH:::
def textSearchAdvanced(self, command, source):
return self.textSearch(command, source, "ADVANCED", config.addFavouriteToMultiRef)
# SEARCHOT:::
def textSearchOT(self, command, source):
commandList = command.split(":::")
commandList[-1] = 'Scripture LIKE "%{0}%" AND Book < 40'.format(commandList[-1])
command = ":::".join(commandList)
return self.textSearch(command, source, "ADVANCED", config.addFavouriteToMultiRef)
# SEARCHNT:::
def textSearchNT(self, command, source):
commandList = command.split(":::")
commandList[-1] = 'Scripture LIKE "%{0}%" AND Book >= 40 AND Book <= 66'.format(commandList[-1])
command = ":::".join(commandList)
return self.textSearch(command, source, "ADVANCED", config.addFavouriteToMultiRef)
# SEARCHSINGLE:::
def textSearchSingleBook(self, book, command, source):
commandList = command.split(":::")
commandList[-1] = 'Scripture LIKE "%{0}%" AND Book = {1}'.format(commandList[-1], book)
command = ":::".join(commandList)
return self.textSearch(command, source, "ADVANCED", config.addFavouriteToMultiRef)
# ANDSEARCH:::
def textAndSearch(self, command, source):
commandList = command.split(":::")
index = -2 if command.count(":::") == 2 else -1
commandList[index] = " AND ".join(['Scripture LIKE "%{0}%"'.format(m.strip()) for m in commandList[index].split("|")])
command = ":::".join(commandList)
return self.textSearch(command, source, "ADVANCED", config.addFavouriteToMultiRef)
# ORSEARCH:::
def textOrSearch(self, command, source):
commandList = command.split(":::")
index = -2 if command.count(":::") == 2 else -1
commandList[index] = " OR ".join(['Scripture LIKE "%{0}%"'.format(m.strip()) for m in commandList[index].split("|")])
command = ":::".join(commandList)
return self.textSearch(command, source, "ADVANCED", config.addFavouriteToMultiRef)
# called by SEARCH::: & ANDSEARCH::: & ORSEARCH::: & ADVANCEDSEARCH::: & REGEXSEARCH:::
def textSearch(self, command, source, mode, favouriteVersion=False, referenceOnly=False):
if command.count(":::") == 0:
command = "{0}:::{1}".format(config.mainText, command)
commandList = self.splitCommand(command)
texts = self.getConfirmedTexts(commandList[0], True)
if not texts:
texts = [config.mainText]
searchEntry = commandList[1] if texts[0] in commandList[0] else command
booksRange = ""
if searchEntry.count(":::") > 0:
searchEntry, booksRange = self.splitCommand(searchEntry)
if not texts:
return self.invalidCommand()
else:
biblesSqlite = BiblesSqlite()
searchResult = "<hr>".join([biblesSqlite.searchBible(text, mode, searchEntry, favouriteVersion, referenceOnly, booksRange) for text in texts])
return ("study", searchResult, {})
# SEARCHHIGHLIGHT:::
def highlightSearch(self, command, source):
if config.enableVerseHighlighting:
if command.count(":::") == 0:
command += ":::all"
code, reference = self.splitCommand(command)
highlight = Highlight()
verses = highlight.getHighlightedBcvList(code, reference)
bcv = [(b, c, v) for b, c, v, *_ in verses]
text = BiblesSqlite().readMultipleVerses(config.mainText, bcv)
text = highlight.highlightSearchResults(text, verses)
return ("study", text, {})
else:
return ("", "", {})
# WORD:::
def textWordData(self, command, source):
try:
book, wordId = self.splitCommand(command)
bNo = int(book)
morphologySqlite = MorphologySqlite()
bcvTuple, content = morphologySqlite.wordData(bNo, int(wordId))
# extra data for Greek words
if bNo >= 40:
wordData = WordData()
content += re.sub('^.*?<br><br><b><i>TBESG', '<b><i>TBESG', wordData.getContent("NT", wordId))
self.setStudyVerse(config.studyText, bcvTuple)
return ("study", content, {'tab_title': 'Mor:' + wordId})
except:
return self.invalidCommand()
# return default lexicons
def getDefaultLexicons(self):
return {
"H": config.defaultLexiconStrongH,
"G": config.defaultLexiconStrongG,
"E": config.defaultLexiconETCBC,
"L": config.defaultLexiconLXX,
"g": config.defaultLexiconGK,
"l": config.defaultLexiconLN,
}
# LEXICON:::
def textLexicon(self, command, source):
return self.textLexiconSearch(command, source, False)
# REVERSELEXICON:::
def textReverseLexicon(self, command, source):
return self.textLexiconSearch(command, source, True)
def textLexiconSearch(self, command, source, reverse):
if command.count(":::") == 0:
defaultLexicon = self.getDefaultLexicons()
command = "{0}:::{1}".format(defaultLexicon[command[0]], command)
module, entries = self.splitCommand(command)
if module == config.thisTranslation['searchAllLexicons']:
modules = LexiconData().lexiconList
showLexiconMenu = False
else:
modules = [module]
showLexiconMenu = True if not config.runMode == "terminal" else False
entries = entries.strip()
if config.useLiteVerseParsing and not config.noQt:
try:
if config.qtLibrary == "pyside6":
from PySide6.QtWidgets import QApplication
else:
from qtpy.QtWidgets import QApplication
QApplication.clipboard().setText(entries)
except:
pass
TextCommandParser.last_lexicon_entry = entries
content = ""
for module in modules:
config.lexicon = module
lexicon = Lexicon(module)
# Convert ETCBC Hebrew lexeme codes, if any, to Hebrew Strong's numbers
morphologySqlite = MorphologySqlite()
entriesSplit = entries.split("_")
entryList = []
for entry in entriesSplit:
config.eventEntry = entry
PluginEventHandler.handleEvent("lexicon_entry", entry)
entry = config.eventEntry
if not reverse and not module.startswith("Concordance") and not module == "Morphology" and entry.startswith("E"):
entryList += morphologySqlite.etcbcLexemeNo2StrongNo(entry)
else:
entryList.append(entry)
if reverse:
content += "<hr>".join([lexicon.getReverseContent(entry) for entry in entryList])
else:
content += "<hr>".join([lexicon.getContent(entry, showLexiconMenu) for entry in entryList])
if not content or content == "INVALID_COMMAND_ENTERED":
return self.invalidCommand()
else:
if config.runMode == "terminal":
if module == "ConcordanceBook":
def searchBookLink(match):
lexicalEntry = match.group(1)
bookAbb = match.group(2)
try:
bookNo = BibleBooks.name2number[bookAbb]
except:
bookNo = BibleBooks.name2number[f"{bookAbb}."]
return f"""<br>[<ref>MORPHOLOGY:::LexicalEntry LIKE '%{lexicalEntry},%' AND Book = {bookNo}</ref>]"""
p = re.compile("""\[<ref onclick="searchBook\('([^']+?)','([^']+?)'\)">search</ref>\]""")
content = p.sub(searchBookLink, content)
elif module == "ConcordanceMorphology":
def searchMorphologyLink(match):
lexicalEntry = match.group(1)
morphologyCode = match.group(2)
return f"""<br>[<ref>MORPHOLOGYCODE:::{lexicalEntry},{morphologyCode}</ref>]"""
p = re.compile("""\[<ref onclick="searchCode\('([^']+?)','([^']+?)'\)">search</ref>\]""")
content = p.sub(searchMorphologyLink, content)
def morphologyDescription(match):
morphologyModule = match.group(1)
if morphologyModule.endswith("morph"):
morphologyModule = morphologyModule[:-5]
morphologyModule = morphologyModule.upper()
morpohlogyCode = match.group(2)
return f"<u><b>{morpohlogyCode}</b></u><br>[<ref>SEARCHTOOL:::m{morphologyModule}:::{morpohlogyCode}</ref>]"
p = re.compile("""<u><b><ref onclick="(rmac|etcbcmorph|lxxmorph)\('([^']+?)'\)">[^<>]*?</ref></b></u>""")
content = p.sub(morphologyDescription, content)
title = "RevLex" if reverse else "Lex"
return ("study", content, {'tab_title': title + ':' + module + ':' + entries})
# SEARCHLEXICON:::
def searchLexicon(self, command, source):
if command.count(":::") == 0:
defaultLexicon = self.getDefaultLexicons()
command = "{0}:::{1}".format(defaultLexicon[command[0]], command)
moduleList, search = self.splitCommand(command)
search = search.strip()
TextCommandParser.last_lexicon_entry = search
if moduleList == config.thisTranslation["all"]:
modules = LexiconData().getLexiconList()
else:
modules = moduleList.split("_")
content = ""
for module in modules:
config.lexicon = module
lexicon = Lexicon(module)
content += lexicon.searchTopic(search)
if not content or content == "INVALID_COMMAND_ENTERED":
return self.invalidCommand()
else:
return ("study", content, {'tab_title': 'SearchLex:' + module + ':' + search})
# LMCOMBO:::
def textLMcombo(self, command, source):
if command.count(":::") == 2:
lexicalEntry, morphologyModule, morphologyCode = command.split(":::")
defaultLexicon = self.getDefaultLexicons()[lexicalEntry[0]]
return self.getLexiconMorphologyContent(defaultLexicon, lexicalEntry, morphologyModule, morphologyCode)
elif command.count(":::") == 3:
lexicon, lexicalEntry, morphologyModule, morphologyCode = command.split(":::")
return self.getLexiconMorphologyContent(lexicon, lexicalEntry, morphologyModule, morphologyCode)
else:
return self.invalidCommand()
def getLexiconMorphologyContent(self, lexicon, lexicalEntry, morphologyModule, morphologyCode):
lexicon = Lexicon(lexicon)
lexiconContent = "<hr>".join([lexicon.getContent(entry) for entry in lexicalEntry.split("_")])
searchSqlite = SearchSqlite()
morphologyDescription = "<hr>".join([searchSqlite.getContent("m"+morphologyModule.upper(), code) for code in morphologyCode.split("_")])
return ("study", "{0}<hr>{1}".format(morphologyDescription, lexiconContent), {})
# _wordnote:::
def textWordNote(self, command, source):
if re.search("^(LXX1|LXX2|LXX1i|LXX2i|SBLGNT|SBLGNTl):::", command):
module, wordID = self.splitCommand(command)
bibleSqlite = Bible(module)
data = bibleSqlite.readWordNote(wordID)
if data:
return ("study", data, {})
else:
return self.invalidCommand()
else:
return self.invalidCommand()
# LEMMA:::
def textLemma(self, command, source):
return self.textMorphologyFeature(command, source, "LEMMA")
# MORPHOLOGYCODE:::
def textMorphologyCode(self, command, source):
return self.textMorphologyFeature(command, source, "MORPHOLOGYCODE")
# MORPHOLOGY:::
def textMorphology(self, command, source):
return self.textMorphologyFeature(command, source, "ADVANCED")
# SEARCHMORPHOLOGY:::
def textSearchMorphology(self, command, source):
#LexicalEntry LIKE '%E70746,%' AND
if not command.count(":::") == 1:
return self.invalidCommand("study")
else:
lexicalEntry, morphology = command.split(":::")
lexicalEntry = "LexicalEntry LIKE '%{0},%'".format(lexicalEntry)
morphology = " OR ".join(['Morphology LIKE "%{0}%"'.format(m.strip()) for m in morphology.split("|")])
command = "{0} AND ({1})".format(lexicalEntry, morphology)
return self.textMorphologyFeature(command, source, "ADVANCED")
# called by LEMMA::: & MORPHOLOGYCODE::: & MORPHOLOGY::: & # SEARCHMORPHOLOGY:::
def textMorphologyFeature(self, command, source, mode):
morphologySqlite = MorphologySqlite()
searchResult = morphologySqlite.searchMorphology(mode, command)
return ("study", searchResult, {})
# _searchword:::
def textSearchWord(self, command, source):
portion, wordID = self.splitCommand(command)
morphologySqlite = MorphologySqlite()
lexeme, lexicalEntry, morphologyString = morphologySqlite.searchWord(portion, wordID)
lexicalEntry = lexicalEntry.split(",")[0]
translations = morphologySqlite.distinctMorphology(lexicalEntry)
items = (lexeme, lexicalEntry, morphologyString, translations)
self.parent.openMorphDialog(items)
return ("", "", {})
# SEARCHMORPHOLOGYBYLEX:::
def searchMorphologyByLex(self, command, source):
return self.searchMorphologyCommon(command, source, "LEX")
# SEARCHMORPHOLOGYBYWORD:::
def searchMorphologyByWord(self, command, source):
return self.searchMorphologyCommon(command, source, "WORD")
# SEARCHMORPHOLOGYBYGLOSS:::
def searchMorphologyByGloss(self, command, source):
return self.searchMorphologyCommon(command, source, "GLOSS")
def searchMorphologyCommon(self, command, source, mode):
commands = command.split(":::")
searchTerm = commands[0]
morphology = commands[1]
startBook = 1
endBook = 66
if len(commands) > 2:
range = commands[2]
if "-" in range:
startBook, endBook = range.split("-")
else:
startBook = range
endBook = range
morphologyList = morphology.split(",")
morphologySqlist = MorphologySqlite()
if mode == "LEX":
searchTerm += ","
records = morphologySqlist.searchByLexicalAndMorphology(startBook, endBook, searchTerm, morphologyList)
elif mode == "WORD":
records = morphologySqlist.searchByWordAndMorphology(startBook, endBook, searchTerm, morphologyList)
elif mode == "GLOSS":
records = morphologySqlist.searchByGlossAndMorphology(startBook, endBook, searchTerm, morphologyList)
fontStart = ""
fontEnd = ""
if len(records) > 0:
b = records[0][2]
if b < 40:
fontStart = "<heb>"
fontEnd = "</heb>"
else:
fontStart = "<grk>"
fontEnd = "</grk>"
formatedText = "<p>{3}{0}{4}:::{1} <b style='color: brown;'>{2}</b> hits</p>".format(
searchTerm, morphology, len(records), fontStart, fontEnd)
ohgbiInstalled = os.path.isfile(os.path.join(config.marvelData, "bibles", "OHGBi.bible"))
if config.addOHGBiToMorphologySearch and ohgbiInstalled:
ohgbiBible = Bible("OHGBi")
for index, word in enumerate(records):
wordID, clauseID, b, c, v, textWord, lexicalEntry, morphologyCode, morphology, lexeme, transliteration, pronuciation, interlinear, translation, gloss = word
firstLexicalEntry = lexicalEntry.split(",")[0]
textWord = "<{3} onclick='w({1},{2})' onmouseover='iw({1},{2})'>{0}</{3}>".format(textWord, b, wordID, "heb" if b < 40 else "grk")
formatedText += "<div><span style='color: purple;'>({0}{1}</ref>)</span> {2} <ref onclick='searchCode(\"{4}\", \"{3}\")'>{3}</ref>".format(morphologySqlist.formVerseTag(b, c, v, config.mainText), morphologySqlist.bcvToVerseReference(b, c, v), textWord, morphologyCode, firstLexicalEntry)
if config.addOHGBiToMorphologySearch and ohgbiInstalled:
formatedText += ohgbiBible.getHighlightedOHGBVerse(b, c, v, wordID, False, index + 1 > config.maximumOHGBiVersesDisplayedInSearchResult)
formatedText += "<br></div>"
return ("study", formatedText, {})
# _setconfig:::
def textSetConfig(self, command, source):
try:
# when argument is empty
if not command:
# key only without value
content = "<h2>Configurable Settings</h2>"
content += pprint.pformat(list(config.help.keys()))
return ("study", content, {})
elements = self.splitCommand(command)
if len(elements) == 1:
key = elements[0]
content = f"<h2>{key}</h2>"
content += f"<ref>Description:</ref><br>{config.help[key]}<br>"
content += f"<ref>Current value</ref>: {(eval('pprint.pformat(config.'+key+')'))}<br>"
typeString = type(eval(f"config.{key}")).__name__
content += f"<ref>Type</ref>: {typeString}"
return ("study", content, {})
if config.developer or config.webFullAccess:
item, value = self.splitCommand(command)
if not item in config.help.keys():
return self.invalidCommand("study")
else:
# use """ to allow using ' or " for string
newConfig = """{0} = {1}""".format(item, value)
exec("config."+newConfig)
message = f"The value of config.{item} is now changed to {newConfig}."
if config.runMode == "terminal":
print(message)
return ("study", ".restart", {})
return ("study", message, {})
else:
return self.invalidCommand("study")
except:
return self.invalidCommand("study")
# EXLB:::
def textExlb(self, command, source):
commandList = self.splitCommand(command)
if commandList and len(commandList) == 2:
module, *_ = commandList
if module in ["exlbl", "exlbp", "exlbt"]:
if module == "exlbt":
config.topic = "EXLBT"
config.topicEntry = commandList[1]
elif module == "exlbp":
config.characterEntry = commandList[1]
elif module == "exlbl":
config.locationEntry = commandList[1]
exlbData = ExlbData()
content = exlbData.getContent(commandList[0], commandList[1])
if config.runMode == "terminal" and module == "exlbl":
#<p align="center">[<ref onclick="website('https://maps.google.com/?q=31.777444,35.234935&ll=31.777444,35.234935&z=10')">Click HERE for a Live Google Map</ref>]</p>
content = re.sub("""<p align="center">\[<ref onclick="website\('(.*?)'\)">Click HERE for a Live Google Map</ref>\]</p>""", r"[<ref>\1</ref> ]", content)
if config.theme in ("dark", "night"):
content = self.adjustDarkThemeColorsForExl(content)
return ("study", content, {})
else:
return self.invalidCommand("study")
else:
return self.invalidCommand("study")
# CLAUSE:::
def textClause(self, command, source):
if command.count(":::") == 1:
bcv, entry = self.splitCommand(command)
b, c, v = [int(i) for i in bcv.split(".")]
clauseData = ClauseData()
if b < 40:
testament = "OT"
else:
testament = "NT"
content = "<h2>Clause id: c{0}</h2>{1}".format(entry, clauseData.getContent(testament, entry))
self.setStudyVerse(config.studyText, (b, c, v))
return ("study", content, {})
else:
return self.invalidCommand("study")
# DICTIONARY:::
def textDictionary(self, command, source):
indexes = IndexesSqlite()
dictionaryList = dict(indexes.dictionaryList).keys()
module = command[:3]
if module in dictionaryList:
if not module == "HBN":
config.dictionary = module
dictionaryData = DictionaryData()
content = dictionaryData.getContent(command)
config.dictionaryEntry = command
return ("study", content, {})
else:
return self.invalidCommand("study")
# ENCYCLOPEDIA:::
def textEncyclopedia(self, command, source):
commandList = self.splitCommand(command)
if commandList and len(commandList) == 2:
module, entry = commandList
indexes = IndexesSqlite()
encyclopediaList = dict(indexes.encyclopediaList).keys()
if module in encyclopediaList:
config.encyclopedia = module
encyclopediaData = EncyclopediaData()
content = encyclopediaData.getContent(module, entry)
return ("study", content, {})
else:
return self.invalidCommand("study")
else:
return self.invalidCommand("study")
# BOOK:::
def textBook(self, command, source):
bookData = BookData()
#bookList = [book for book, *_ in bookData.getCatalogBookList()]
if command.count(":::") == 0:
# if command.count(":::") == 0 and command in bookList:
config.book = command
self.parent.updateBookButton()
return ("study", bookData.getMenu(module=config.book), {'tab_title': command[:20]})
commandList = self.splitCommand(command)
if commandList and len(commandList) == 2:
module, entry = commandList
anchor = None
if '#' in entry:
parts = re.split("#", entry)
entry = parts[0]
anchor = parts[1]
content = bookData.getContent(module, entry)
isPDF = True if type(content) == bytes and content[0] == 37 and content[1] == 80 and content[2] == 68 else False
pdfFilename = None
if isPDF:
pdfFilename = entry
if not content:
return self.invalidCommand("study")
else:
if not isPDF and config.theme in ("dark", "night"):
content = self.adjustDarkThemeColorsForExternalBook(content)
if config.openBookInNewWindow:
self.parent.updateBookButton()
return ("popover.study", content, {'tab_title': module[:20], 'pdf_filename': pdfFilename})
else:
self.parent.updateBookButton()
return ("study", content, {'tab_title': module[:20], 'jump_to': anchor, 'pdf_filename': pdfFilename})
else:
return self.invalidCommand("study")
# SEARCHBOOKCHAPTER:::
def textSearchBookChapter(self, command, source):
return self.textSearchBook(command, source, chapterOnly=True)
# SEARCHBOOK:::
def textSearchBook(self, command, source, chapterOnly=False):
bookData = BookData()
if command.count(":::") == 0:
command = "{0}:::{1}".format(config.book, command)
modules, searchString = self.splitCommand(command)
if modules == "ALL":
modules = ",".join(CatalogUtil.getBooks())
elif modules == "FAV":
modules = ",".join(config.favouriteBooks)
if not config.book in config.favouriteBooks:
modules = "{0},{1}".format(config.book, modules)
if not searchString:
return self.invalidCommand("study")
else:
config.bookSearchString = searchString
modules = modules.split(",")
content = "<hr>".join([bookData.getSearchedMenu(module, searchString, chapterOnly=chapterOnly) for module in modules])
if not content:
return ("study", config.thisTranslation["search_notFound"], {})
#return self.invalidCommand("study")
else:
self.parent.updateBookButton()
return ("study", content, {})
# SEARCHALLBOOKSPDF:::
def textSearchAllBooksAndPdf(self, command, source):
view, content1, *_ = self.textSearchBook("ALL:::{0}".format(command), source)
view, content2, *_ = self.searchPdf(command, source)
return ("study", content1+content2, {'tab_title': "Books/PDF"})
# SEARCHJOURNAL:::
def searchJournalNote(self, command, source):
config.noteSearchString = command
noteSqlite = JournalSqlite()
days = noteSqlite.getSearchJournalList(command)
days = [f"{y}-{m}-{d}" for y, m, d in days]
prefix = "[MESSAGE]" if config.runMode == "terminal" else ""
return ("study", "{3}<p>\"<b style='color: brown;'>{0}</b>\" is found in <b style='color: brown;'>{1}</b> note(s) on book(s)</p><p>{2}</p>".format(command, len(days), "; ".join(days), prefix), {})
# SEARCHBOOKNOTE:::
def textSearchBookNote(self, command, source):
config.noteSearchString = command
noteSqlite = NoteSqlite()
books = noteSqlite.getSearchedBookList(command)
prefix = "[MESSAGE]" if config.runMode == "terminal" else ""
return ("study", "{3}<p>\"<b style='color: brown;'>{0}</b>\" is found in <b style='color: brown;'>{1}</b> note(s) on book(s)</p><p>{2}</p>".format(command, len(books), "; ".join(books), prefix), {})
# SEARCHCHAPTERNOTE:::
def textSearchChapterNote(self, command, source):
config.noteSearchString = command
noteSqlite = NoteSqlite()
chapters = noteSqlite.getSearchedChapterList(command)
prefix = "[MESSAGE]" if config.runMode == "terminal" else ""
return ("study", "{3}<p>\"<b style='color: brown;'>{0}</b>\" is found in <b style='color: brown;'>{1}</b> note(s) on chapter(s)</p><p>{2}</p>".format(command, len(chapters), "; ".join(chapters), prefix), {})
# SEARCHVERSENOTE:::
def textSearchVerseNote(self, command, source):
config.noteSearchString = command
noteSqlite = NoteSqlite()
verses = noteSqlite.getSearchedVerseList(command)
prefix = "[MESSAGE]" if config.runMode == "terminal" else ""
return ("study", "{3}<p>\"<b style='color: brown;'>{0}</b>\" is found in <b style='color: brown;'>{1}</b> note(s) on verse(s)</p><p>{2}</p>".format(command, len(verses), "; ".join(verses), prefix), {})
# DAY:::
def getDayEntry(self, entry):
dayEntry = allDays[int(entry)][-1]
if config.runMode == "terminal":
print(f"Scripture: {dayEntry}")
dayEntry = dayEntry.split(", ")
parser = BibleVerseParser(config.parserStandarisation)
for index, reference in enumerate(dayEntry):
if not ":" in reference:
b, c, *_ = parser.extractAllReferences(reference)[0]
lastVerse = Bible(config.mainText).getLastVerse(b, c)
fullReference = parser.bcvToVerseReference(b, c, 1, c, lastVerse)
dayEntry[index] = fullReference
return ", ".join(dayEntry)
def textDay(self, command, source):
try:
if command.count(":::") == 0:
if config.enableHttpServer and config.webHomePage == "traditional.html":
config.mainText = "CUV"
command = "CUV:::{0}".format(command)
elif config.enableHttpServer and config.webHomePage == "simplified.html":
config.mainText = "CUVs"
command = "CUVs:::{0}".format(command)
else:
command = "{0}:::{1}".format(config.mainText, command)
commandPrefix, entry, *_ = command.split(":::")
dayEntry = self.getDayEntry(entry)
command = "{0}:::{1}".format(commandPrefix, dayEntry)
return self.textBible(command, source)
except:
return self.invalidCommand("study")
# DAYAUDIO:::
def textDayAudio(self, command, source):
try:
if command.count(":::") == 0:
if config.enableHttpServer and config.webHomePage == "traditional.html":
config.mainText = "CUV"
command = "CUV:::{0}".format(command)
elif config.enableHttpServer and config.webHomePage == "simplified.html":
config.mainText = "CUVs"
command = "CUVs:::{0}".format(command)
else:
command = "{0}:::{1}".format(config.mainText, command)
commandPrefix, entry, *_ = command.split(":::")
dayEntry = self.getDayEntry(entry)
command = "{0}:::{1}".format(commandPrefix, dayEntry)
return self.textRead(command, source)
except:
return self.invalidCommand("study")
# DAYAUDIOPLUS:::
def textDayAudioPlus(self, command, source):
try:
if command.count(":::") == 0:
if config.enableHttpServer and config.webHomePage == "traditional.html":
config.mainText = "CUV"
elif config.enableHttpServer and config.webHomePage == "simplified.html":
config.mainText = "CUVs"
biblesSqlite = BiblesSqlite()
favBible1 = biblesSqlite.getFavouriteBible()
favBible2 = biblesSqlite.getFavouriteBible2()
favBible3 = biblesSqlite.getFavouriteBible3()
plusBible = ""
if not config.mainText == favBible1:
plusBible = favBible1
elif not config.mainText == favBible2:
plusBible = favBible2
elif not config.mainText == favBible3:
plusBible = favBible3
command = "{1}_{2}:::{0}".format(command, config.mainText, plusBible)
commandPrefix, entry, *_ = command.split(":::")
dayEntry = self.getDayEntry(entry)
command = "{0}:::{1}".format(commandPrefix, dayEntry)
return self.textRead(command, source)
except:
return self.invalidCommand("study")
# DATA:::
def textData(self, command, source):
config.dataset = command
filepath = os.path.join("plugins", "menu", "Bible Data", "{0}.txt".format(command))
if not os.path.isfile(filepath) or not ("Tabulate" in config.enabled):
return self.invalidCommand("study")
with open(filepath, 'r', encoding='utf8') as fileObj:
dataList = fileObj.read().split("\n")
table = []
headers = [command]
#parser = BibleVerseParser(config.parserStandarisation)
for text in dataList:
# Remove CLRF linebreak
text = re.sub("\r", "", text)
#text = parser.parseText(text)
table.append([text])
from tabulate import tabulate
html = tabulate(table, headers, tablefmt="html")
html = BibleVerseParser(config.parserStandarisation).parseText(html)
return ("study", html, {'tab_title': "Data"})
def getLocationsFromReference(self, reference):
if reference:
combinedLocations = []
indexesSqlite = IndexesSqlite()
if len(reference) == 5:
b, c, v, ce, ve = reference
if c == ce:
if v == ve:
combinedLocations += indexesSqlite.getVerseLocations(b, c, v)
elif ve > v:
combinedLocations += indexesSqlite.getChapterLocations(b, c, startV=v, endV=ve)
elif ce > c:
combinedLocations += indexesSqlite.getChapterLocations(b, c, startV=v)
combinedLocations += indexesSqlite.getChapterLocations(b, ce, endV=ve)
if (ce - c) > 1:
for i in range(c+1, ce):
combinedLocations += indexesSqlite.getChapterLocations(b, i)
else:
b, c, v, *_ = reference
combinedLocations += indexesSqlite.getVerseLocations(b, c, v)
return combinedLocations
else:
return []
# MAP:::
def textMap(self, command, source):
verseList = self.extractAllVerses(command)
if not verseList or not ("Gmplot" in config.enabled):
return self.invalidCommand()
else:
combinedLocations = []
#reference = verseList[0]
#combinedLocations = self.getLocationsFromReference(reference)
for reference in verseList:
combinedLocations += self.getLocationsFromReference(reference)
try:
selectedLocations = self.selectLocations(combinedLocations)
html = self.displayMap(selectedLocations)
return ("study", html, {'tab_title': "Map"})
except:
return self.invalidCommand()
# LOCATIONS:::
def textLocations(self, command, source):
selectedLocations = command.split("|")
selectedLocations = self.selectLocations(defaultChecklist=[i[2:] for i in selectedLocations])
html = self.displayMap(selectedLocations)
return ("study", html, {'tab_title': "Map"})
def selectLocations(self, locations=[], defaultChecklist=[]):
if defaultChecklist:
checkList = defaultChecklist
else:
checkList = []
for location in locations:
# e.g. <p><ref onclick="exlbl('BL1163')">Hiddekel</ref> ... <ref onclick="exlbl('BL421')">Euphrates</ref></p>
searchPattern = "exlbl\('BL([0-9]+?)'\)"
found = re.findall(searchPattern, location[0])
if found:
for entry in found:
checkList.append(entry)
checkList = [int(item) for item in checkList]
checkList = list(set(checkList))
#checkList.sort()
formattedList = []
for num in checkList:
exlbl_entry = "BL{0}".format(num)
if exlbl_entry in self.locationMap:
formattedList.append("{0}. {1}".format(num, self.locationMap[exlbl_entry][1]))
formattedList = list(set(formattedList))
# e.g. For Acts 15:36-18:22, formattedList = ['1160. Thyatira', '76. Apollonia', '87. Areopagus/Mars Hill', '590. Iconium', '636. Jerusalem', '880. Neapolis', '108. Asia/Achaia', '118. Athens', '16. Achaia', '431. Galatia', '956. Pontus', '918. Pamphylia', '1025. Samothracia', '266. Caesarea', '281. Cenchrea', '177. Berea', '1182. Troas', '1122. Syria/Syrians', '1158. Thessalonica', '400. Ephesus', '1013. Rome', '601. Italy', '68. Antioch', '865. Mysia', '747. Macedonia', '330. Derbe', '250. Bithynia', '306. Corinth', '59. Amphipolis', '742. Lystra', '946. Phrygia', '316. Chittim/Cyprus', '300. Cilicia', '942. Philippi']
return formattedList
def displayMap(self, selectedLocations, browser=False):
import gmplot
gmap = gmplot.GoogleMapPlotter(33.877444, 34.234935, 6, map_type='hybrid')
if config.myGoogleApiKey:
gmap.apikey = config.myGoogleApiKey
if selectedLocations:
for item in selectedLocations:
try:
num = int(re.sub("\..*?$", "", item))
exlbl_entry = "BL{0}".format(num)
label, name, latitude, longitude = self.locationMap[exlbl_entry]
if config.standardAbbreviation == "TC" and exlbl_entry in tc_location_names and tc_location_names[exlbl_entry]:
name = tc_location_names[exlbl_entry]
elif config.standardAbbreviation == "SC" and exlbl_entry in sc_location_names and sc_location_names[exlbl_entry]:
name = sc_location_names[exlbl_entry]
googleEarthLink = "https://earth.google.com/web/search/{0},+{1}".format(str(latitude).replace(".", "%2e"), str(longitude).replace(".", "%2e"))
if browser:
weblink = self.getWeblink(f"EXLB:::exlbl:::{exlbl_entry}", filterCommand=False)
info = "<a href='{0}' target='_blank'>{1}</a> [<a href='{2}' target='_blank'>3D</a>]".format(weblink, name, googleEarthLink)
elif config.enableHttpServer:
info = """<a href="#" onclick="document.title = 'EXLB:::exlbl:::{0}';">{1}</a> [<a href='{2}' target='_blank'>3D</a>]""".format(exlbl_entry, name, googleEarthLink)
else:
info = """<a href="#" onclick="document.title = 'EXLB:::exlbl:::{0}';">{1}</a> [<a href="#" onclick="document.title = 'online:::{2}';">3D</a>]""".format(exlbl_entry, name, googleEarthLink)
gmap.marker(latitude, longitude, label=label, title=name, info_window=info)
except:
pass
else:
googleEarthLink = r"https://earth.google.com/web/search/31%2e777444,+35%2e234935"
if browser:
weblink = self.getWeblink("EXLB:::exlbl:::BL636", filterCommand=False)
info = "<a href='{0}' target='_blank'>Jerusalem</a> [<a href='{1}' target='_blank'>3D</a>]".format(weblink, googleEarthLink)
elif config.enableHttpServer:
info = """<ref onclick="document.title = 'EXLB:::exlbl:::BL636';">Jerusalem</ref> [<a href='{0}' target='_blank'>3D</a>]""".format(googleEarthLink)
else:
info = """<ref onclick="document.title = 'EXLB:::exlbl:::BL636';">Jerusalem</ref> [<ref onclick="document.title = 'online:::{0}';">3D</ref>]""".format(googleEarthLink)
gmap.marker(31.777444, 35.234935, label="J", title="Jerusalem", info_window=info)
# HTML text
return gmap.get()
def getWeblink(self, command="", filterCommand=True):
if config.runMode == "terminal":
server = "http://localhost:8080"
if not config.mainWindow.isUrlAlive(server):
server = ""
else:
server = ""
return TextUtil.getWeblink(config.mainWindow.getCommand(command) if config.runMode == "terminal" and filterCommand else command, server=server)
# CROSSREFERENCE:::
def textCrossReference(self, command, source):
if command.count(":::") == 1:
file, verses = self.splitCommand(command)
files = [file]
else:
verses = command
files = [None]
for file in glob.glob(config.marvelData + "/xref/*.xref"):
files.append(os.path.basename(file).replace(".xref", ""))
verseList = self.extractAllVerses(verses)
if not verseList:
return self.invalidCommand()
biblesSqlite = BiblesSqlite()
content = ""
for file in files:
crossReferenceSqlite = CrossReferenceSqlite(file)
xrefFile = ""
if file is not None:
xrefFile = " ({0})".format(file)
for verse in verseList:
content += "<h2>Cross-reference{1}: <ref onclick='document.title=\"{0}\"'>{0}</ref></h2>".format(biblesSqlite.bcvToVerseReference(*verse), xrefFile)
crossReferenceList = self.extractAllVerses(crossReferenceSqlite.getCrossReferenceList(verse))
if crossReferenceList:
crossReferenceList.insert(0, tuple(verse))
content += biblesSqlite.readMultipleVerses(config.mainText, crossReferenceList)
content += "<hr>"
self.setStudyVerse(config.studyText, verseList[-1])
return ("study", content, {})
# TSKE:::
def tske(self, command, source):
verseList = self.extractAllVerses(command)
if not verseList:
return self.invalidCommand()
else:
biblesSqlite = BiblesSqlite()
crossReferenceSqlite = CrossReferenceSqlite()
content = ""
for verse in verseList:
content += "<h2>TSKE: <ref id='v{0}.{1}.{2}' onclick='document.title=\"{3}\"'>{3}</ref></h2>".format(*verse[:3], biblesSqlite.bcvToVerseReference(*verse))
tskeContent = crossReferenceSqlite.tske(verse)
content += "<div style='margin: 10px; padding: 0px 10px; border: 1px solid gray; border-radius: 5px;'>{0}</div>".format(tskeContent)
crossReferenceList = self.extractAllVerses(tskeContent)
if not crossReferenceList:
content += "[No cross-reference is found for this verse!]"
else:
crossReferenceList.insert(0, tuple(verse))
content += biblesSqlite.readMultipleVerses(config.mainText, crossReferenceList)
content += "<hr>"
self.setStudyVerse(config.studyText, verseList[-1])
return ("study", content, {})
# COMBO:::
def textCombo(self, command, source):
return ("study", "".join([self.textVerseData(command, source, feature) for feature in ("translation", "discourse", "words")]), {})
# TRANSLATION:::
def textTranslation(self, command, source):
return ("study", self.textVerseData(command, source, "translation"), {})
# DISCOURSE:::
def textDiscourse(self, command, source):
return ("study", self.textVerseData(command, source, "discourse"), {})
# WORDS:::
def textWords(self, command, source):
return ("study", self.textVerseData(command, source, "words"), {})
# called by TRANSLATION::: & WORDS::: & DISCOURSE::: & COMBO:::
def textVerseData(self, command, source, filename):
try:
verseList = self.extractAllVerses(command)
if not verseList:
return self.invalidCommand()
else:
biblesSqlite = BiblesSqlite()
verseData = VerseData(filename)
feature = "{0}{1}".format(filename[0].upper(), filename[1:])
#content = "<hr>".join(["<h2>{0}: <ref onclick='document.title=\"{1}\"'>{1}</ref></h2>{2}".format(feature, biblesSqlite.bcvToVerseReference(b, c, v), verseData.getContent((b, c, v))) for b, c, v in verseList])
contentList = []
for b, c, v in verseList:
subContent = "<h2>{0}: <ref onclick='document.title=\"{1}\"'>{1}</ref></h2>{2}".format(feature, biblesSqlite.bcvToVerseReference(b, c, v), verseData.getContent((b, c, v)))
if filename == "discourse":
subContent = re.sub("(<pm>|</pm>|<n>|</n>)", "", subContent)
if b < 40:
subContent = re.sub("""(<heb id="wh)([0-9]+?)("[^<>]*?>[^<>]+?</heb>[ ]*)""", r"""\1\2\3 <ref onclick="document.title='READWORD:::BHS5.{0}.{1}.{2}.\2'">{3}</ref>""".format(b, c, v, config.audioBibleIcon), subContent)
else:
subContent = re.sub("""(<grk id="w[0]*?)([1-9]+[0-9]*?)("[^<>]*?>[^<>]+?</grk>[ ]*)""", r"""\1\2\3 <ref onclick="document.title='READWORD:::OGNT.{0}.{1}.{2}.\2'">{3}</ref>""".format(b, c, v, config.audioBibleIcon), subContent)
if filename == "words":
if b < 40:
subContent = re.sub("""(<ref onclick="document.title=')READWORD(.*?)(<tlit>[^<>]*?</tlit><br><hlr><heb>[^<>]+?</heb>)""", r"\1READWORD\2\3 \1READLEXEME\2", subContent)
else:
subContent = re.sub("""(<ref onclick="document.title=')READWORD(.*?)(<tlit>[^<>]*?</tlit><br><hlr><grk>[^<>]+?</grk>)""", r"\1READWORD\2\3 \1READLEXEME\2", subContent)
contentList.append(subContent)
content = "<hr>".join(contentList)
self.setStudyVerse(config.studyText, verseList[-1])
return content
except:
return self.invalidCommand()
# INDEX:::
def textIndex(self, command, source):
verseList = self.extractAllVerses(command)
if not verseList:
return self.invalidCommand()
else:
parser = BibleVerseParser(config.parserStandarisation)
indexesSqlite = IndexesSqlite()
content = ""
for verse in verseList:
b, c, v = verse
content += "<h2>{0} - <ref onclick='document.title=\"{1}\"'>{1}</ref></h2>{2}<hr>".format(config.thisTranslation["menu4_indexes"], parser.bcvToVerseReference(b, c, v), indexesSqlite.getAllIndexes(verse))
self.setStudyVerse(config.studyText, verseList[-1])
return ("study", content, {})
# CHAPTERINDEX:::
def textChapterIndex(self, command, source):
verseList = self.extractAllVerses(command)
if not verseList:
return self.invalidCommand()
else:
parser = BibleVerseParser(config.parserStandarisation)
indexesSqlite = IndexesSqlite()
content = ""
for verse in verseList:
b, c, v = verse
content += "<h2>Indexes: <ref onclick='document.title=\"{0}\"'>{0}</ref></h2>{1}<hr>".format(parser.bcvToVerseReference(b, c, v, isChapter=True), indexesSqlite.getChapterIndexes(verse[:2]))
self.setStudyVerse(config.studyText, verseList[-1])
return ("study", content, {})
# SEARCHTHIRDDICTIONARY:::
def thirdDictionarySearch(self, command, source):
if command.count(":::") == 0:
command = "{0}:::{1}".format(config.thirdDictionary, command)
module, entry = self.splitCommand(command)
if module == config.thisTranslation['searchAllDictionaries']:
modules = self.parent.thirdPartyDictionaryList
showMenu = False
else:
modules = [module]
showMenu = False if config.runMode == "terminal" else True
content = ""
for module in modules:
module = self.parent.isThridPartyDictionary(module)
if module:
thirdPartyDictionary = ThirdPartyDictionary(module)
if entry:
content += thirdPartyDictionary.search(entry, showMenu)
elif not entry:
allTopics = [f"<ref>{i[0]}</ref>" if config.runMode == "terminal" else f"""<ref onclick="document.title='THIRDDICTIONARY:::{module[0]}:::{i[0]}'">{i[0]}</ref>""" for i in thirdPartyDictionary.getAllEntries()]
content += "<br>".join(allTopics)
if len(content) > 0:
return ("study", content, {})
else:
return self.invalidCommand("study")
# THIRDDICTIONARY:::
def thirdDictionaryOpen(self, command, source):
if command.count(":::") == 0:
command = "{0}:::{1}".format(config.thirdDictionary, command)
module, entry = self.splitCommand(command)
module = self.parent.isThridPartyDictionary(module)
if not entry or not module:
return self.invalidCommand("study")
else:
thirdPartyDictionary = ThirdPartyDictionary(module)
content = thirdPartyDictionary.getData(entry)
config.thirdDictionaryEntry = entry
return ("study", f"[MESSAGE]{content}" if config.runMode == "terminal" else content, {})
# _HIGHLIGHT:::
def highlightVerse(self, command, source):
hl = Highlight()
if command.count(":::") == 0:
command = "delete:::" + command.strip()
code, reference = self.splitCommand(command)
verseList = self.extractAllVerses(reference)
for b, c, v in verseList:
if code == "delete":
hl.removeHighlight(b, c, v)
else:
hl.highlightVerse(b, c, v, code)
return ("", "", {})
def adjustDarkThemeColorsForExl(self, content):
content = content.replace("#FFFFFF", "#555555")
content = content.replace("#DFDFDF", "gray")
content = content.replace('color="navy"', 'color="#609b00"')
return content
def adjustDarkThemeColorsForExternalBook(self, content):
content = content.replace("background-color:#FFFFFF", "background-color:#323232")
return content
# PDF:::
def openPdfReader(self, command, source):
if command.count(":::") == 0:
command += ":::1"
pdfFile, page = self.splitCommand(command)
if source == "http":
return self.parent.openPdfReader(pdfFile, page)
else:
self.parent.openPdfReader(pdfFile, page)
return ("", "", {})
# PDFFIND:::
def openPdfReaderFind(self, command, source):
pdfFile, find = self.splitCommand(command)
if source == "http":
return self.parent.openPdfReader(pdfFile, 0, False, False, find)
else:
self.parent.openPdfReader(pdfFile, 0, False, False, find)
return ("", "", {})
# SEARCHPDF:::
def searchPdf(self, command, source):
content = "<h2>Search PDF for <span style='color: brown;'>{0}</span></h2>".format(command)
for file in glob.glob(r"{0}/pdf/*.pdf".format(config.marvelData)):
with open(file, 'rb') as f:
datafile = f.readlines()
for line in datafile:
try:
if command in line.decode("utf-8"):
basename = os.path.basename(file)
content += """<ref onclick='document.title="PDFFIND:::{0}:::{1}"'>{0}</ref><br>""".format(basename, command)
break
except Exception as e:
pass
return ("study", content, {'tab_title': "PDF search"})
# ANYPDF:::
def openPdfReaderFullpath(self, command, source):
if command.count(":::") == 0:
command += ":::1"
pdfFile, page = self.splitCommand(command)
self.parent.openPdfReader(pdfFile, page, True)
return ("", "", {})
# _SAVEPDFCURRENTPAGE:::
def savePdfCurrentPage(self, page, source):
command = "ANYPDF:::{0}:::{1}".format(config.pdfTextPath, page)
self.parent.addHistoryRecord("study", command)
self.parent.displayMessage(config.thisTranslation["saved"])
return ("", "", {})
# EPUB:::
def openEpubReader(self, command, source):
if command.count(":::") == 0:
command += ":::1"
pdfFile, page = self.splitCommand(command)
if source == "http":
return self.parent.openEpubReader(pdfFile, page)
else:
self.parent.openEpubReader(pdfFile, page)
return ("", "", {})
# IMPORT:::
def importResources(self, command, source):
if not command:
command = "import"
self.parent.importModulesInFolder(command)
return ("", "", {})
# DOWNLOAD:::
def download(self, command, source):
if config.isDownloading:
self.parent.displayMessage(config.thisTranslation["previousDownloadIncomplete"])
return ("", config.thisTranslation["previousDownloadIncomplete"], {})
else:
action, filename = self.splitCommand(command)
action = action.lower()
if action.startswith("marvel") or action.startswith("hymn"):
if action == "marvelbible":
dataset = DatafileLocation.marvelBibles
elif action == "marvelcommentary":
dataset = DatafileLocation.marvelCommentaries
elif action == "marveldata":
dataset = DatafileLocation.marvelData
else:
self.parent.displayMessage("{0} {1}".format(action, config.thisTranslation["unknown"]))
return ("", "", {})
if filename in dataset.keys():
databaseInfo = dataset[filename]
if os.path.isfile(os.path.join(*databaseInfo[0])):
self.parent.displayMessage("{0} {1}".format(filename, config.thisTranslation["alreadyExists"]))
else:
# self.parent.downloader = Downloader(self.parent, databaseInfo, True)
# self.parent.downloader.show()
self.parent.displayMessage("{0} {1}".format(config.thisTranslation["Downloading"], filename))
self.parent.downloadFile(databaseInfo, False)
self.parent.reloadControlPanel(False)
else:
self.parent.displayMessage("{0} {1}".format(filename, config.thisTranslation["notFound"]))
elif action.startswith("github"):
if not ("Pygithub" in config.enabled):
return ("", "", {})
if action == "githubbible":
repo, directory, text, extension = GitHubRepoInfo.bibles
elif action == "githubcommentary":
repo, directory, text, extension = GitHubRepoInfo.commentaries
elif action == "githubbook":
repo, directory, text, extension = GitHubRepoInfo.books
elif action == "githubmap":
repo, directory, text, extension = GitHubRepoInfo.maps
elif action == "githubpdf":
repo, directory, text, extension = GitHubRepoInfo.pdf
elif action == "githubepub":
repo, directory, text, extension = GitHubRepoInfo.epub
else:
self.parent.displayMessage("{0} {1}".format(action, config.thisTranslation["unknown"]))
return ("", "", {})
from util.GithubUtil import GithubUtil
github = GithubUtil(repo)
repoData = github.getRepoData()
folder = os.path.join(config.marvelData, directory)
shortFilename = GithubUtil.getShortname(filename)
shortFilename += "." + extension
if os.path.isfile(os.path.join(folder, shortFilename)):
self.parent.displayMessage("{0} {1}".format(filename, config.thisTranslation["alreadyExists"]))
else:
file = os.path.join(folder, shortFilename+".zip")
if filename in repoData.keys():
github.downloadFile(file, repoData[filename])
elif shortFilename in repoData.keys():
github.downloadFile(file, repoData[shortFilename])
else:
self.parent.displayMessage(
"{0} {1}".format(filename, config.thisTranslation["notFound"]))
return
with zipfile.ZipFile(file, 'r') as zipped:
zipped.extractall(folder)
os.remove(file)
self.parent.reloadControlPanel(False)
self.parent.displayMessage("{0} {1}".format(filename, config.thisTranslation["message_installed"]))
else:
self.parent.displayMessage("{0} {1}".format(action, config.thisTranslation["unknown"]))
return ("study", "Downloaded!", {})
def noAction(self, command, source):
if config.enableHttpServer:
return self.textText(config.mainText, source)
else:
return ("", "", {})
# _fixlinksincommentary:::
def fixLinksInCommentary(self, command, source):
commentary = Commentary(command)
if commentary.connection is None:
self.parent.displayMessage("{0} {1}".format(command, config.thisTranslation["notFound"]))
else:
commentary.fixLinksInCommentary()
self.parent.displayMessage(config.thisTranslation["message_done"])
return ("", "", {})
# DEVOTIONAL:::
def openDevotional(self, command, source):
if command.count(":::") == 1:
devotional, date = self.splitCommand(command)
else:
devotional = command
date = ""
self.parent.openDevotional(devotional, date)
return ("", "", {})
if __name__ == "__main__":
from Languages import Languages
config.thisTranslation = Languages.translation
config.parserStandarisation = 'NO'
config.standardAbbreviation = 'ENG'
config.marvelData = "/Users/otseng/dev/UniqueBible/marvelData/"
parser = TextCommandParser("")
command = "searchhighlight:::all:::mal"
parser.parser(command)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((63135, 63172), 're.split', 're.split', (['"""[ ]*?:::[ ]*?"""', 'command', '(1)'], {}), "('[ ]*?:::[ ]*?', command, 1)\n", (63143, 63172), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((65168, 65182), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (65180, 65182), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((76142, 76156), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (76154, 76156), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((76418, 76432), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (76430, 76432), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((76612, 76653), 'os.path.join', 'os.path.join', (['config.marvelData', '"""bibles"""'], {}), "(config.marvelData, 'bibles')\n", (76624, 76653), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((77229, 77240), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (77234, 77240), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((94032, 94131), 'subprocess.Popen', 'subprocess.Popen', (['"""ffmpeg -version"""'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "('ffmpeg -version', shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n", (94048, 94131), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((102937, 102967), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (102949, 102967), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((102979, 103004), 'os.path.isfile', 'os.path.isfile', (['audioFile'], {}), '(audioFile)\n', (102993, 103004), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((104230, 104260), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (104242, 104260), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((104272, 104297), 'os.path.isfile', 'os.path.isfile', (['audioFile'], {}), '(audioFile)\n', (104286, 104297), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((105623, 105653), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (105635, 105653), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((105665, 105690), 'os.path.isfile', 'os.path.isfile', (['audioFile'], {}), '(audioFile)\n', (105679, 105690), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((113516, 113539), 'db.StatisticsWordsSqlite.StatisticsWordsSqlite', 'StatisticsWordsSqlite', ([], {}), '()\n', (113537, 113539), False, 'from db.StatisticsWordsSqlite import StatisticsWordsSqlite\n'), ((113648, 113683), 're.findall', 're.findall', (['""" ([GH][0-9]*?) """', 'text'], {}), "(' ([GH][0-9]*?) ', text)\n", (113658, 113683), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((123782, 123794), 'util.Translator.Translator', 'Translator', ([], {}), '()\n', (123792, 123794), False, 'from util.Translator import Translator\n'), ((146397, 146440), 'os.path.join', 'os.path.join', (['config.marvelData', '*fileitems'], {}), '(config.marvelData, *fileitems)\n', (146409, 146440), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((148013, 148037), 'os.path.join', 'os.path.join', (['*fileitems'], {}), '(*fileitems)\n', (148025, 148037), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((153160, 153178), 'db.BiblesSqlite.MorphologySqlite', 'MorphologySqlite', ([], {}), '()\n', (153176, 153178), False, 'from db.BiblesSqlite import MorphologySqlite\n'), ((153228, 153279), 're.sub', 're.sub', (['"""^[h0]+?([^h0])"""', '"""\\\\1"""', 'wordID'], {'flags': 're.M'}), "('^[h0]+?([^h0])', '\\\\1', wordID, flags=re.M)\n", (153234, 153279), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((153925, 153955), 'os.path.isfile', 'os.path.isfile', (['commentaryFile'], {}), '(commentaryFile)\n', (153939, 153955), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((160837, 160853), 'db.ToolsSqlite.Commentary', 'Commentary', (['text'], {}), '(text)\n', (160847, 160853), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((161032, 161042), 'db.ToolsSqlite.BookData', 'BookData', ([], {}), '()\n', (161040, 161042), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((163764, 163777), 'db.ToolsSqlite.ImageSqlite', 'ImageSqlite', ([], {}), '()\n', (163775, 163777), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((165946, 165999), 're.search', 're.search', (['"""^[0-9]+?\\\\.[0-9]+?\\\\.[0-9]+?$"""', 'reference'], {}), "('^[0-9]+?\\\\.[0-9]+?\\\\.[0-9]+?$', reference)\n", (165955, 165999), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((186621, 186637), 'db.ToolsSqlite.Lexicon', 'Lexicon', (['lexicon'], {}), '(lexicon)\n', (186628, 186637), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((186764, 186778), 'db.ToolsSqlite.SearchSqlite', 'SearchSqlite', ([], {}), '()\n', (186776, 186778), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((187089, 187153), 're.search', 're.search', (['"""^(LXX1|LXX2|LXX1i|LXX2i|SBLGNT|SBLGNTl):::"""', 'command'], {}), "('^(LXX1|LXX2|LXX1i|LXX2i|SBLGNT|SBLGNTl):::', command)\n", (187098, 187153), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((188702, 188720), 'db.BiblesSqlite.MorphologySqlite', 'MorphologySqlite', ([], {}), '()\n', (188718, 188720), False, 'from db.BiblesSqlite import MorphologySqlite\n'), ((188985, 189003), 'db.BiblesSqlite.MorphologySqlite', 'MorphologySqlite', ([], {}), '()\n', (189001, 189003), False, 'from db.BiblesSqlite import MorphologySqlite\n'), ((190345, 190363), 'db.BiblesSqlite.MorphologySqlite', 'MorphologySqlite', ([], {}), '()\n', (190361, 190363), False, 'from db.BiblesSqlite import MorphologySqlite\n'), ((196355, 196370), 'db.ToolsSqlite.IndexesSqlite', 'IndexesSqlite', ([], {}), '()\n', (196368, 196370), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((197611, 197621), 'db.ToolsSqlite.BookData', 'BookData', ([], {}), '()\n', (197619, 197621), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((199518, 199528), 'db.ToolsSqlite.BookData', 'BookData', ([], {}), '()\n', (199526, 199528), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((201007, 201022), 'db.JournalSqlite.JournalSqlite', 'JournalSqlite', ([], {}), '()\n', (201020, 201022), False, 'from db.JournalSqlite import JournalSqlite\n'), ((201544, 201556), 'db.NoteSqlite.NoteSqlite', 'NoteSqlite', ([], {}), '()\n', (201554, 201556), False, 'from db.NoteSqlite import NoteSqlite\n'), ((202034, 202046), 'db.NoteSqlite.NoteSqlite', 'NoteSqlite', ([], {}), '()\n', (202044, 202046), False, 'from db.NoteSqlite import NoteSqlite\n'), ((202535, 202547), 'db.NoteSqlite.NoteSqlite', 'NoteSqlite', ([], {}), '()\n', (202545, 202547), False, 'from db.NoteSqlite import NoteSqlite\n'), ((203119, 203164), 'util.BibleVerseParser.BibleVerseParser', 'BibleVerseParser', (['config.parserStandarisation'], {}), '(config.parserStandarisation)\n', (203135, 203164), False, 'from util.BibleVerseParser import BibleVerseParser\n'), ((207419, 207460), 'tabulate.tabulate', 'tabulate', (['table', 'headers'], {'tablefmt': '"""html"""'}), "(table, headers, tablefmt='html')\n", (207427, 207460), False, 'from tabulate import tabulate\n'), ((211548, 211615), 'gmplot.GoogleMapPlotter', 'gmplot.GoogleMapPlotter', (['(33.877444)', '(34.234935)', '(6)'], {'map_type': '"""hybrid"""'}), "(33.877444, 34.234935, 6, map_type='hybrid')\n", (211571, 211615), False, 'import gmplot\n'), ((215329, 215343), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (215341, 215343), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((223943, 223954), 'db.Highlight.Highlight', 'Highlight', ([], {}), '()\n', (223952, 223954), False, 'from db.Highlight import Highlight\n'), ((232132, 232151), 'db.ToolsSqlite.Commentary', 'Commentary', (['command'], {}), '(command)\n', (232142, 232151), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((66763, 66848), 're.match', 're.match', (['"""^[Cc][Oo][Mm][Pp][Aa][Rr][Ee]:::(.*?:::)"""', "config.history['main'][-1]"], {}), "('^[Cc][Oo][Mm][Pp][Aa][Rr][Ee]:::(.*?:::)', config.history['main'][-1]\n )\n", (66771, 66848), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((67301, 67390), 're.match', 're.match', (['"""^[Pp][Aa][Rr][Aa][Ll][Ll][Ee][Ll]:::(.*?:::)"""', "config.history['main'][-1]"], {}), "('^[Pp][Aa][Rr][Aa][Ll][Ll][Ee][Ll]:::(.*?:::)', config.history[\n 'main'][-1])\n", (67309, 67390), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((67856, 67953), 're.match', 're.match', (['"""^[Ss][Ii][Dd][Ee][Bb][Yy][Ss][Ii][Dd][Ee]:::(.*?:::)"""', "config.history['main'][-1]"], {}), "('^[Ss][Ii][Dd][Ee][Bb][Yy][Ss][Ii][Dd][Ee]:::(.*?:::)', config.\n history['main'][-1])\n", (67864, 67953), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((69452, 69493), 'os.path.join', 'os.path.join', (['config.marvelData', '"""bibles"""'], {}), "(config.marvelData, 'bibles')\n", (69464, 69493), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((71987, 72046), 'util.PluginEventHandler.PluginEventHandler.handleEvent', 'PluginEventHandler.handleEvent', (['"""post_parse_bible"""', 'command'], {}), "('post_parse_bible', command)\n", (72017, 72046), False, 'from util.PluginEventHandler import PluginEventHandler\n'), ((72816, 72880), 're.search', 're.search', (['"""_chapters:::(MOB|MIB|MPB|MTB|MAB|OHGB|OHGBi)_"""', 'text'], {}), "('_chapters:::(MOB|MIB|MPB|MTB|MAB|OHGB|OHGBi)_', text)\n", (72825, 72880), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((73290, 73331), 're.sub', 're.sub', (['"""(<pm>|</pm>|<n>|</n>)"""', '""""""', 'text'], {}), "('(<pm>|</pm>|<n>|</n>)', '', text)\n", (73296, 73331), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((74133, 74171), 're.sub', 're.sub', (['"""<vid .*?>.*?</vid>"""', '""""""', 'text'], {}), "('<vid .*?>.*?</vid>', '', text)\n", (74139, 74171), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((74237, 74316), 're.sub', 're.sub', (['"""<sup><ref onclick=\'bn\\\\([^\\\\(\\\\)]*?\\\\)\'>⊕</ref></sup>"""', '""""""', 'text'], {}), '("<sup><ref onclick=\'bn\\\\([^\\\\(\\\\)]*?\\\\)\'>⊕</ref></sup>", \'\', text)\n', (74243, 74316), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((75633, 75814), 're.compile', 're.compile', (['"""(<sup><ref onclick=\'bn\\\\([^\\\\n\\\\(\\\\)]*?\\\\)\'>⊕</ref></sup>|<woj>⸃</woj>|</woj>|</i>|</ot>|</mbe>|</mbn>)(<sup><ref onclick=\'l[^\\\\r<>]*?>\\\\*</ref></sup>)"""'], {}), '(\n "(<sup><ref onclick=\'bn\\\\([^\\\\n\\\\(\\\\)]*?\\\\)\'>⊕</ref></sup>|<woj>⸃</woj>|</woj>|</i>|</ot>|</mbe>|</mbn>)(<sup><ref onclick=\'l[^\\\\r<>]*?>\\\\*</ref></sup>)"\n )\n', (75643, 75814), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((75893, 75967), 're.compile', 're.compile', (['"""([^\\\\n<>]+?)<sup><ref (onclick=\'l[^\\\\r<>]*?>)\\\\*</ref></sup>"""'], {}), '("([^\\\\n<>]+?)<sup><ref (onclick=\'l[^\\\\r<>]*?>)\\\\*</ref></sup>")\n', (75903, 75967), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((78259, 78282), 'util.WebtopUtil.WebtopUtil.run', 'WebtopUtil.run', (['command'], {}), '(command)\n', (78273, 78282), False, 'from util.WebtopUtil import WebtopUtil\n'), ((97072, 97086), 'prompt_toolkit.input.create_input', 'create_input', ([], {}), '()\n', (97084, 97086), False, 'from prompt_toolkit.input import create_input\n'), ((106785, 106808), 'os.path.isfile', 'os.path.isfile', (['command'], {}), '(command)\n', (106799, 106808), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((117079, 117090), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (117084, 117090), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((118644, 118655), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (118649, 118655), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((119909, 119928), 'db.ToolsSqlite.Commentary', 'Commentary', (['command'], {}), '(command)\n', (119919, 119928), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((126309, 126320), 'Languages.Languages', 'Languages', ([], {}), '()\n', (126318, 126320), False, 'from Languages import Languages\n'), ((129166, 129180), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (129178, 129180), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((130511, 130525), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (130523, 130525), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((131662, 131676), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (131674, 131676), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((139086, 139097), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (139091, 139097), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((145354, 145369), 'db.JournalSqlite.JournalSqlite', 'JournalSqlite', ([], {}), '()\n', (145367, 145369), False, 'from db.JournalSqlite import JournalSqlite\n'), ((150165, 150189), 'os.path.join', 'os.path.join', (['*pathItems'], {}), '(*pathItems)\n', (150177, 150189), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((150733, 150771), 'util.LexicalData.LexicalData.getLexicalData', 'LexicalData.getLexicalData', (['item', '(True)'], {}), '(item, True)\n', (150759, 150771), False, 'from util.LexicalData import LexicalData\n'), ((151054, 151072), 'db.BiblesSqlite.MorphologySqlite', 'MorphologySqlite', ([], {}), '()\n', (151070, 151072), False, 'from db.BiblesSqlite import MorphologySqlite\n'), ((153496, 153510), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (153508, 153510), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((162108, 162144), 'config.mainWindow.getclipboardtext', 'config.mainWindow.getclipboardtext', ([], {}), '()\n', (162142, 162144), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((162875, 162916), 're.sub', 're.sub', (['""" #"""', '"""<br>#"""', 'content'], {}), "(' #', '<br>#', content)\n", (162881, 162916), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((163147, 163195), 'os.path.join', 'os.path.join', (['"""htmlResources"""', '"""images"""', 'command'], {}), "('htmlResources', 'images', command)\n", (163159, 163195), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((167012, 167027), 'db.ToolsSqlite.IndexesSqlite', 'IndexesSqlite', ([], {}), '()\n', (167025, 167027), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((170106, 170227), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'config.chatGPTApiModel', 'messages': 'messages', 'n': '(1)', 'temperature': '(0.0)', 'max_tokens': '(2048)'}), '(model=config.chatGPTApiModel, messages=\n messages, n=1, temperature=0.0, max_tokens=2048)\n', (170134, 170227), False, 'import openai, traceback, shutil\n'), ((170452, 170504), 're.sub', 're.sub', (['"""^SELECT . FROM Verses WHERE """', '""""""', 'sqlQuery'], {}), "('^SELECT . FROM Verses WHERE ', '', sqlQuery)\n", (170458, 170504), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((172002, 172049), 'os.path.join', 'os.path.join', (['"""llama_index"""', 'f"""{text}_md_index"""'], {}), "('llama_index', f'{text}_md_index')\n", (172014, 172049), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((172074, 172100), 'os.path.join', 'os.path.join', (['"""temp"""', 'text'], {}), "('temp', text)\n", (172086, 172100), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((173410, 173463), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (173438, 173463), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n'), ((173484, 173524), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (173507, 173524), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n'), ((174710, 174749), 're.match', 're.match', (['"""^[EHG][0-9]+?$"""', 'searchEntry'], {}), "('^[EHG][0-9]+?$', searchEntry)\n", (174718, 174749), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((178458, 178472), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (178470, 178472), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((178951, 178962), 'db.Highlight.Highlight', 'Highlight', ([], {}), '()\n', (178960, 178962), False, 'from db.Highlight import Highlight\n'), ((179501, 179519), 'db.BiblesSqlite.MorphologySqlite', 'MorphologySqlite', ([], {}), '()\n', (179517, 179519), False, 'from db.BiblesSqlite import MorphologySqlite\n'), ((181753, 181768), 'db.ToolsSqlite.Lexicon', 'Lexicon', (['module'], {}), '(module)\n', (181760, 181768), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((181884, 181902), 'db.BiblesSqlite.MorphologySqlite', 'MorphologySqlite', ([], {}), '()\n', (181900, 181902), False, 'from db.BiblesSqlite import MorphologySqlite\n'), ((185560, 185575), 'db.ToolsSqlite.Lexicon', 'Lexicon', (['module'], {}), '(module)\n', (185567, 185575), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((187237, 187250), 'db.BiblesSqlite.Bible', 'Bible', (['module'], {}), '(module)\n', (187242, 187250), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((191312, 191368), 'os.path.join', 'os.path.join', (['config.marvelData', '"""bibles"""', '"""OHGBi.bible"""'], {}), "(config.marvelData, 'bibles', 'OHGBi.bible')\n", (191324, 191368), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((191460, 191474), 'db.BiblesSqlite.Bible', 'Bible', (['"""OHGBi"""'], {}), "('OHGBi')\n", (191465, 191474), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((195878, 195890), 'db.BiblesSqlite.ClauseData', 'ClauseData', ([], {}), '()\n', (195888, 195890), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((196606, 196622), 'db.ToolsSqlite.DictionaryData', 'DictionaryData', ([], {}), '()\n', (196620, 196622), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((197062, 197077), 'db.ToolsSqlite.IndexesSqlite', 'IndexesSqlite', ([], {}), '()\n', (197075, 197077), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((207267, 207289), 're.sub', 're.sub', (["'\\r'", '""""""', 'text'], {}), "('\\r', '', text)\n", (207273, 207289), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((207730, 207745), 'db.ToolsSqlite.IndexesSqlite', 'IndexesSqlite', ([], {}), '()\n', (207743, 207745), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((215068, 215113), 'glob.glob', 'glob.glob', (["(config.marvelData + '/xref/*.xref')"], {}), "(config.marvelData + '/xref/*.xref')\n", (215077, 215113), False, 'import glob, pprint, traceback, pydoc, threading, asyncio\n'), ((215427, 215453), 'db.ToolsSqlite.CrossReferenceSqlite', 'CrossReferenceSqlite', (['file'], {}), '(file)\n', (215447, 215453), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((216418, 216432), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (216430, 216432), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((216468, 216490), 'db.ToolsSqlite.CrossReferenceSqlite', 'CrossReferenceSqlite', ([], {}), '()\n', (216488, 216490), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((220683, 220728), 'util.BibleVerseParser.BibleVerseParser', 'BibleVerseParser', (['config.parserStandarisation'], {}), '(config.parserStandarisation)\n', (220699, 220728), False, 'from util.BibleVerseParser import BibleVerseParser\n'), ((220757, 220772), 'db.ToolsSqlite.IndexesSqlite', 'IndexesSqlite', ([], {}), '()\n', (220770, 220772), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((221417, 221462), 'util.BibleVerseParser.BibleVerseParser', 'BibleVerseParser', (['config.parserStandarisation'], {}), '(config.parserStandarisation)\n', (221433, 221462), False, 'from util.BibleVerseParser import BibleVerseParser\n'), ((221491, 221506), 'db.ToolsSqlite.IndexesSqlite', 'IndexesSqlite', ([], {}), '()\n', (221504, 221506), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((223626, 223654), 'util.ThirdParty.ThirdPartyDictionary', 'ThirdPartyDictionary', (['module'], {}), '(module)\n', (223646, 223654), False, 'from util.ThirdParty import ThirdPartyDictionary\n'), ((52118, 52157), 'functools.partial', 'partial', (['self.textSearchSingleBook', 'key'], {}), '(self.textSearchSingleBook, key)\n', (52125, 52157), False, 'from functools import partial\n'), ((62168, 62192), 'os.path.join', 'os.path.join', (['*fileItems'], {}), '(*fileItems)\n', (62180, 62192), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((62605, 62627), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (62625, 62627), False, 'import openai, traceback, shutil\n'), ((65823, 65868), 'util.BibleVerseParser.BibleVerseParser', 'BibleVerseParser', (['config.parserStandarisation'], {}), '(config.parserStandarisation)\n', (65839, 65868), False, 'from util.BibleVerseParser import BibleVerseParser\n'), ((65962, 66007), 'util.BibleVerseParser.BibleVerseParser', 'BibleVerseParser', (['config.parserStandarisation'], {}), '(config.parserStandarisation)\n', (65978, 66007), False, 'from util.BibleVerseParser import BibleVerseParser\n'), ((66099, 66144), 'util.BibleVerseParser.BibleVerseParser', 'BibleVerseParser', (['config.parserStandarisation'], {}), '(config.parserStandarisation)\n', (66115, 66144), False, 'from util.BibleVerseParser import BibleVerseParser\n'), ((68982, 69017), 're.sub', 're.sub', (['""" \\\\d+:?\\\\d?$"""', '""""""', 'command'], {}), "(' \\\\d+:?\\\\d?$', '', command)\n", (68988, 69017), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((73869, 74017), 're.sub', 're.sub', (['"""([ ]*<ref onclick="wa[gh])(\\\\([0-9]+?,[0-9]+?\\\\)">[^<>]+?</ref>)(.*?</wform>.*?<wlex>.*?</wlex></ref>)"""', '"""\\\\1\\\\2\\\\3\\\\1l\\\\2"""', 'text'], {}), '(\n \'([ ]*<ref onclick="wa[gh])(\\\\([0-9]+?,[0-9]+?\\\\)">[^<>]+?</ref>)(.*?</wform>.*?<wlex>.*?</wlex></ref>)\'\n , \'\\\\1\\\\2\\\\3\\\\1l\\\\2\', text)\n', (73875, 74017), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((75587, 75616), 're.sub', 're.sub', (['search', 'replace', 'text'], {}), '(search, replace, text)\n', (75593, 75616), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((76697, 76730), 'os.listdir', 'os.listdir', (['formattedBiblesFolder'], {}), '(formattedBiblesFolder)\n', (76707, 76730), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((78528, 78618), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n', (78544, 78618), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((79372, 79440), 'subprocess.Popen', 'subprocess.Popen', (['"""which espeak"""'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), "('which espeak', shell=True, stdout=subprocess.PIPE)\n", (79388, 79440), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((80575, 80617), 'config.mainWindow.createAudioPlayingFile', 'config.mainWindow.createAudioPlayingFile', ([], {}), '()\n', (80615, 80617), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((80641, 80673), 're.sub', 're.sub', (['"""(\\\\. |。)"""', '"""\\\\1*"""', 'text'], {}), "('(\\\\. |。)', '\\\\1*', text)\n", (80647, 80673), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((80973, 81015), 'config.mainWindow.removeAudioPlayingFile', 'config.mainWindow.removeAudioPlayingFile', ([], {}), '()\n', (81013, 81015), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((82053, 82078), 'os.path.isfile', 'os.path.isfile', (['audioFile'], {}), '(audioFile)\n', (82067, 82078), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((84544, 84579), 're.sub', 're.sub', (['"""^\\\\[.*?\\\\] """', '""""""', 'language'], {}), "('^\\\\[.*?\\\\] ', '', language)\n", (84550, 84579), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((84834, 84955), 'subprocess.Popen', 'subprocess.Popen', (['[command]'], {'shell': '(True)', 'preexec_fn': 'os.setpgrp', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL'}), '([command], shell=True, preexec_fn=os.setpgrp, stdout=\n subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n', (84850, 84955), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((87097, 87136), 'util.WebtopUtil.WebtopUtil.isPackageInstalled', 'WebtopUtil.isPackageInstalled', (['"""espeak"""'], {}), "('espeak')\n", (87126, 87136), False, 'from util.WebtopUtil import WebtopUtil\n'), ((88313, 88345), 'PyQt5.QtTextToSpeech.QTextToSpeech.availableEngines', 'QTextToSpeech.availableEngines', ([], {}), '()\n', (88343, 88345), False, 'from PyQt5.QtTextToSpeech import QTextToSpeech\n'), ((90836, 90874), 'util.WebtopUtil.WebtopUtil.isPackageInstalled', 'WebtopUtil.isPackageInstalled', (['"""pkill"""'], {}), "('pkill')\n", (90865, 90874), False, 'from util.WebtopUtil import WebtopUtil\n'), ((94545, 94575), 're.search', 're.search', (['"""^[0-9]+? """', 'option'], {}), "('^[0-9]+? ', option)\n", (94554, 94575), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((96912, 96937), 'webbrowser.open', 'webbrowser.open', (['wikiPage'], {}), '(wikiPage)\n', (96927, 96937), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((106977, 107028), 'config.mainWindow.addToAudioPlayList', 'config.mainWindow.addToAudioPlayList', (['command', '(True)'], {}), '(command, True)\n', (107013, 107028), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((107268, 107296), 'util.WebtopUtil.WebtopUtil.openFile', 'WebtopUtil.openFile', (['command'], {}), '(command)\n', (107287, 107296), False, 'from util.WebtopUtil import WebtopUtil\n'), ((112598, 112634), 're.match', 're.match', (['"""^[EGH][0-9]+?$"""', 'strongNo'], {}), "('^[EGH][0-9]+?$', strongNo)\n", (112606, 112634), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((121783, 121795), 'db.BiblesSqlite.Bible', 'Bible', (['"""KJV"""'], {}), "('KJV')\n", (121788, 121795), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((122939, 122970), 'config.mainWindow.copy', 'config.mainWindow.copy', (['command'], {}), '(command)\n', (122961, 122970), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((128085, 128103), 'db.BiblesSqlite.MorphologySqlite', 'MorphologySqlite', ([], {}), '()\n', (128101, 128103), False, 'from db.BiblesSqlite import MorphologySqlite\n'), ((128318, 128336), 'db.BiblesSqlite.MorphologySqlite', 'MorphologySqlite', ([], {}), '()\n', (128334, 128336), False, 'from db.BiblesSqlite import MorphologySqlite\n'), ((134976, 135021), 'util.BibleVerseParser.BibleVerseParser', 'BibleVerseParser', (['config.parserStandarisation'], {}), '(config.parserStandarisation)\n', (134992, 135021), False, 'from util.BibleVerseParser import BibleVerseParser\n'), ((135053, 135067), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (135065, 135067), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((136680, 136725), 'util.BibleVerseParser.BibleVerseParser', 'BibleVerseParser', (['config.parserStandarisation'], {}), '(config.parserStandarisation)\n', (136696, 136725), False, 'from util.BibleVerseParser import BibleVerseParser\n'), ((136757, 136771), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (136769, 136771), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((136793, 136812), 'db.ToolsSqlite.CollectionsSqlite', 'CollectionsSqlite', ([], {}), '()\n', (136810, 136812), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((138463, 138508), 'util.BibleVerseParser.BibleVerseParser', 'BibleVerseParser', (['config.parserStandarisation'], {}), '(config.parserStandarisation)\n', (138479, 138508), False, 'from util.BibleVerseParser import BibleVerseParser\n'), ((138540, 138554), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (138552, 138554), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((138576, 138595), 'db.ToolsSqlite.CollectionsSqlite', 'CollectionsSqlite', ([], {}), '()\n', (138593, 138595), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((142604, 142659), 'config.mainWindow.openNoteEditor', 'config.mainWindow.openNoteEditor', (['"""book"""'], {'b': 'b', 'c': 'c', 'v': 'v'}), "('book', b=b, c=c, v=v)\n", (142636, 142659), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((143579, 143637), 'config.mainWindow.openNoteEditor', 'config.mainWindow.openNoteEditor', (['"""chapter"""'], {'b': 'b', 'c': 'c', 'v': 'v'}), "('chapter', b=b, c=c, v=v)\n", (143611, 143637), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((144552, 144608), 'config.mainWindow.openNoteEditor', 'config.mainWindow.openNoteEditor', (['"""verse"""'], {'b': 'b', 'c': 'c', 'v': 'v'}), "('verse', b=b, c=c, v=v)\n", (144584, 144608), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((145243, 145255), 'datetime.date.today', 'date.today', ([], {}), '()\n', (145253, 145255), False, 'from datetime import date\n'), ((145739, 145751), 'datetime.date.today', 'date.today', ([], {}), '()\n', (145749, 145751), False, 'from datetime import date\n'), ((145883, 145959), 'config.mainWindow.openNoteEditor', 'config.mainWindow.openNoteEditor', (['"""journal"""'], {'year': 'year', 'month': 'month', 'day': 'day'}), "('journal', year=year, month=month, day=day)\n", (145915, 145959), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((146736, 146760), 'os.path.isfile', 'os.path.isfile', (['fullPath'], {}), '(fullPath)\n', (146750, 146760), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((148788, 148836), 'os.path.join', 'os.path.join', (['config.marvelData', '"""docx"""', 'command'], {}), "(config.marvelData, 'docx', command)\n", (148800, 148836), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((152594, 152608), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (152606, 152608), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((154146, 154165), 'db.ToolsSqlite.Commentary', 'Commentary', (['command'], {}), '(command)\n', (154156, 154165), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((156936, 156985), 'config.verseNoDoubleClickAction.startswith', 'config.verseNoDoubleClickAction.startswith', (['"""_cp"""'], {}), "('_cp')\n", (156978, 156985), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((157508, 157557), 'config.verseNoDoubleClickAction.startswith', 'config.verseNoDoubleClickAction.startswith', (['"""_cp"""'], {}), "('_cp')\n", (157550, 157557), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((159097, 159136), 'config.verseNoSingleClickAction.upper', 'config.verseNoSingleClickAction.upper', ([], {}), '()\n', (159134, 159136), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((159140, 159165), 'config.syncAction.upper', 'config.syncAction.upper', ([], {}), '()\n', (159163, 159165), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((159468, 159517), 'config.verseNoSingleClickAction.startswith', 'config.verseNoSingleClickAction.startswith', (['"""_cp"""'], {}), "('_cp')\n", (159510, 159517), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((162275, 162292), 'pyperclip.paste', 'pyperclip.paste', ([], {}), '()\n', (162290, 162292), False, 'import pyperclip\n'), ((163259, 163296), 'os.system', 'os.system', (['f"""termux-share {filepath}"""'], {}), "(f'termux-share {filepath}')\n", (163268, 163296), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((163331, 163369), 'os.system', 'os.system', (['f"""{config.open} {filepath}"""'], {}), "(f'{config.open} {filepath}')\n", (163340, 163369), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((165228, 165246), 'db.ToolsSqlite.Commentary', 'Commentary', (['module'], {}), '(module)\n', (165238, 165246), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((166303, 166321), 'db.ToolsSqlite.Commentary', 'Commentary', (['module'], {}), '(module)\n', (166313, 166321), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((171103, 171150), 'os.system', 'os.system', (['"""pip3 install --upgrade llama_index"""'], {}), "('pip3 install --upgrade llama_index')\n", (171112, 171150), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((172153, 172177), 'os.path.isdir', 'os.path.isdir', (['bible_dir'], {}), '(bible_dir)\n', (172166, 172177), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((172290, 172316), 'os.path.isdir', 'os.path.isdir', (['persist_dir'], {}), '(persist_dir)\n', (172303, 172316), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((172776, 172898), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': 'config.chatGPTApiTemperature', 'model': 'config.chatGPTApiModel', 'max_tokens': 'config.chatGPTApiMaxTokens'}), '(temperature=config.chatGPTApiTemperature, model=config.\n chatGPTApiModel, max_tokens=config.chatGPTApiMaxTokens)\n', (172782, 172898), False, 'from llama_index.llms import OpenAI\n'), ((172928, 172965), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (172956, 172965), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n'), ((173102, 173180), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (173136, 173180), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n'), ((174944, 174958), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (174956, 174958), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((179691, 179701), 'db.ToolsSqlite.WordData', 'WordData', ([], {}), '()\n', (179699, 179701), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((180995, 181008), 'db.ToolsSqlite.LexiconData', 'LexiconData', ([], {}), '()\n', (181006, 181008), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((182073, 182127), 'util.PluginEventHandler.PluginEventHandler.handleEvent', 'PluginEventHandler.handleEvent', (['"""lexicon_entry"""', 'entry'], {}), "('lexicon_entry', entry)\n", (182103, 182127), False, 'from util.PluginEventHandler import PluginEventHandler\n'), ((194826, 194836), 'db.ToolsSqlite.ExlbData', 'ExlbData', ([], {}), '()\n', (194834, 194836), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((197270, 197288), 'db.ToolsSqlite.EncyclopediaData', 'EncyclopediaData', ([], {}), '()\n', (197286, 197288), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((198188, 198208), 're.split', 're.split', (['"""#"""', 'entry'], {}), "('#', entry)\n", (198196, 198208), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((199749, 199771), 'util.CatalogUtil.CatalogUtil.getBooks', 'CatalogUtil.getBooks', ([], {}), '()\n', (199769, 199771), False, 'from util.CatalogUtil import CatalogUtil\n'), ((205740, 205754), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (205752, 205754), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((206846, 206870), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (206860, 206870), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((207476, 207521), 'util.BibleVerseParser.BibleVerseParser', 'BibleVerseParser', (['config.parserStandarisation'], {}), '(config.parserStandarisation)\n', (207492, 207521), False, 'from util.BibleVerseParser import BibleVerseParser\n'), ((210212, 210250), 're.findall', 're.findall', (['searchPattern', 'location[0]'], {}), '(searchPattern, location[0])\n', (210222, 210250), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((214523, 214559), 'config.mainWindow.isUrlAlive', 'config.mainWindow.isUrlAlive', (['server'], {}), '(server)\n', (214551, 214559), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((214662, 214699), 'config.mainWindow.getCommand', 'config.mainWindow.getCommand', (['command'], {}), '(command)\n', (214690, 214699), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((218398, 218412), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (218410, 218412), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((218441, 218460), 'db.ToolsSqlite.VerseData', 'VerseData', (['filename'], {}), '(filename)\n', (218450, 218460), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((222613, 222641), 'util.ThirdParty.ThirdPartyDictionary', 'ThirdPartyDictionary', (['module'], {}), '(module)\n', (222633, 222641), False, 'from util.ThirdParty import ThirdPartyDictionary\n'), ((69541, 69574), 'os.listdir', 'os.listdir', (['formattedBiblesFolder'], {}), '(formattedBiblesFolder)\n', (69551, 69574), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((70280, 70291), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (70285, 70291), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((77598, 77620), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', (['language'], {}), '(language)\n', (77610, 77620), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((80864, 80961), 'pydoc.pipepager', 'pydoc.pipepager', (['i'], {'cmd': 'f"""termux-tts-speak -l {language} -r {config.terminalTermuxttsSpeed}"""'}), "(i, cmd=\n f'termux-tts-speak -l {language} -r {config.terminalTermuxttsSpeed}')\n", (80879, 80961), False, 'import glob, pprint, traceback, pydoc, threading, asyncio\n'), ((84393, 84425), 'util.TextUtil.TextUtil.removeVowelAccent', 'TextUtil.removeVowelAccent', (['text'], {}), '(text)\n', (84419, 84425), False, 'from util.TextUtil import TextUtil\n'), ((87030, 87047), 'platform.system', 'platform.system', ([], {}), '()\n', (87045, 87047), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((88417, 88446), 'PyQt5.QtTextToSpeech.QTextToSpeech', 'QTextToSpeech', (['engineNames[0]'], {}), '(engineNames[0])\n', (88430, 88446), False, 'from PyQt5.QtTextToSpeech import QTextToSpeech\n'), ((90896, 90921), 'os.system', 'os.system', (['"""pkill yt-dlp"""'], {}), "('pkill yt-dlp')\n", (90905, 90921), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((98361, 98390), 'util.FileUtil.FileUtil.getMP3TextFile', 'FileUtil.getMP3TextFile', (['text'], {}), '(text)\n', (98384, 98390), False, 'from util.FileUtil import FileUtil\n'), ((100387, 100404), 'threading.Event', 'threading.Event', ([], {}), '()\n', (100402, 100404), False, 'import glob, pprint, traceback, pydoc, threading, asyncio\n'), ((107085, 107137), 'util.VlcUtil.VlcUtil.playMediaFile', 'VlcUtil.playMediaFile', (['command', 'config.vlcSpeed', 'gui'], {}), '(command, config.vlcSpeed, gui)\n', (107106, 107137), False, 'from util.VlcUtil import VlcUtil\n'), ((109941, 109963), 'db.BiblesSqlite.Bible', 'Bible', (['config.mainText'], {}), '(config.mainText)\n', (109946, 109963), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((115119, 115143), 'os.path.join', 'os.path.join', (['*fileItems'], {}), '(*fileItems)\n', (115131, 115143), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((115915, 115939), 'os.path.join', 'os.path.join', (['*fileItems'], {}), '(*fileItems)\n', (115927, 115939), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((117877, 117971), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil.getBibleChapterTable', 'HtmlGeneratorUtil.getBibleChapterTable', (['books[bkNoStr][1]', 'abb', 'chapterList', 'commandPrefix'], {}), '(books[bkNoStr][1], abb, chapterList,\n commandPrefix)\n', (117915, 117971), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((120995, 121089), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil.getBibleChapterTable', 'HtmlGeneratorUtil.getBibleChapterTable', (['books[bkNoStr][1]', 'abb', 'chapterList', 'commandPrefix'], {}), '(books[bkNoStr][1], abb, chapterList,\n commandPrefix)\n', (121033, 121089), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((123164, 123188), 'qtpy.QtWidgets.QApplication.clipboard', 'QApplication.clipboard', ([], {}), '()\n', (123186, 123188), False, 'from qtpy.QtWidgets import QApplication\n'), ((127596, 127620), 'os.path.join', 'os.path.join', (['*fileItems'], {}), '(*fileItems)\n', (127608, 127620), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((146698, 146709), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (146707, 146709), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((147148, 147172), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (147162, 147172), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((148217, 148247), 'util.TextUtil.TextUtil.imageToText', 'TextUtil.imageToText', (['filePath'], {}), '(filePath)\n', (148237, 148247), False, 'from util.TextUtil import TextUtil\n'), ((148440, 148464), 'gui.ImageViewer.ImageViewer', 'ImageViewer', (['self.parent'], {}), '(self.parent)\n', (148451, 148464), False, 'from gui.ImageViewer import ImageViewer\n'), ((149912, 149936), 'webbrowser.open', 'webbrowser.open', (['command'], {}), '(command)\n', (149927, 149936), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((155828, 155847), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil', 'HtmlGeneratorUtil', ([], {}), '()\n', (155845, 155847), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((156073, 156092), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil', 'HtmlGeneratorUtil', ([], {}), '()\n', (156090, 156092), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((159257, 159306), 'config.verseNoSingleClickAction.startswith', 'config.verseNoSingleClickAction.startswith', (['"""_cp"""'], {}), "('_cp')\n", (159299, 159306), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((167993, 168007), 'db.ToolsSqlite.SearchSqlite', 'SearchSqlite', ([], {}), '()\n', (168005, 168007), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((170861, 170883), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (170881, 170883), False, 'import openai, traceback, shutil\n'), ((172199, 172223), 'shutil.rmtree', 'shutil.rmtree', (['bible_dir'], {}), '(bible_dir)\n', (172212, 172223), False, 'import openai, traceback, shutil\n'), ((174045, 174067), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (174065, 174067), False, 'import openai, traceback, shutil\n'), ((179108, 179122), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (179120, 179122), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((183412, 183513), 're.compile', 're.compile', (['"""\\\\[<ref onclick="searchBook\\\\(\'([^\']+?)\',\'([^\']+?)\'\\\\)">search</ref>\\\\]"""'], {}), '(\n \'\\\\[<ref onclick="searchBook\\\\(\\\'([^\\\']+?)\\\',\\\'([^\\\']+?)\\\'\\\\)">search</ref>\\\\]\'\n )\n', (183422, 183513), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((185361, 185374), 'db.ToolsSqlite.LexiconData', 'LexiconData', ([], {}), '()\n', (185372, 185374), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((195201, 195355), 're.sub', 're.sub', (['"""<p align="center">\\\\[<ref onclick="website\\\\(\'(.*?)\'\\\\)">Click HERE for a Live Google Map</ref>\\\\]</p>"""', '"""[<ref>\\\\1</ref> ]"""', 'content'], {}), '(\n \'<p align="center">\\\\[<ref onclick="website\\\\(\\\'(.*?)\\\'\\\\)">Click HERE for a Live Google Map</ref>\\\\]</p>\'\n , \'[<ref>\\\\1</ref> ]\', content)\n', (195207, 195355), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((230365, 230381), 'util.GithubUtil.GithubUtil', 'GithubUtil', (['repo'], {}), '(repo)\n', (230375, 230381), False, 'from util.GithubUtil import GithubUtil\n'), ((230455, 230497), 'os.path.join', 'os.path.join', (['config.marvelData', 'directory'], {}), '(config.marvelData, directory)\n', (230467, 230497), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((230530, 230563), 'util.GithubUtil.GithubUtil.getShortname', 'GithubUtil.getShortname', (['filename'], {}), '(filename)\n', (230553, 230563), False, 'from util.GithubUtil import GithubUtil\n'), ((76749, 76787), 'os.path.join', 'os.path.join', (['formattedBiblesFolder', 'f'], {}), '(formattedBiblesFolder, f)\n', (76761, 76787), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((76822, 76845), 're.search', 're.search', (['"""^[\\\\._]"""', 'f'], {}), "('^[\\\\._]', f)\n", (76831, 76845), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((80742, 80783), 'os.path.isfile', 'os.path.isfile', (['config.audio_playing_file'], {}), '(config.audio_playing_file)\n', (80756, 80783), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((82222, 82244), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (82242, 82244), False, 'import openai, traceback, shutil\n'), ((87178, 87192), 'util.TtsLanguages.TtsLanguages', 'TtsLanguages', ([], {}), '()\n', (87190, 87192), False, 'from util.TtsLanguages import TtsLanguages\n'), ((88593, 88607), 'util.TtsLanguages.TtsLanguages', 'TtsLanguages', ([], {}), '()\n', (88605, 88607), False, 'from util.TtsLanguages import TtsLanguages\n'), ((99101, 99126), 'os.path.isfile', 'os.path.isfile', (['audioFile'], {}), '(audioFile)\n', (99115, 99126), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((99967, 99993), 'os.path.basename', 'os.path.basename', (['fullpath'], {}), '(fullpath)\n', (99983, 99993), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((100060, 100079), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil', 'HtmlGeneratorUtil', ([], {}), '()\n', (100077, 100079), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((100488, 100631), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.parent.playAudioBibleFilePlayListPlusDisplayText', 'args': '(allPlayList, allTextList, False, playback_event)'}), '(target=self.parent.\n playAudioBibleFilePlayListPlusDisplayText, args=(allPlayList,\n allTextList, False, playback_event))\n', (100504, 100631), False, 'import glob, pprint, traceback, pydoc, threading, asyncio\n'), ((100696, 100785), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.parent.playAudioBibleFilePlayList', 'args': '(allPlayList,)'}), '(target=self.parent.playAudioBibleFilePlayList, args=(\n allPlayList,))\n', (100712, 100785), False, 'import glob, pprint, traceback, pydoc, threading, asyncio\n'), ((101981, 102000), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil', 'HtmlGeneratorUtil', ([], {}), '()\n', (101998, 102000), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((103123, 103142), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil', 'HtmlGeneratorUtil', ([], {}), '()\n', (103140, 103142), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((103343, 103396), 'config.mainWindow.addToAudioPlayList', 'config.mainWindow.addToAudioPlayList', (['audioFile', '(True)'], {}), '(audioFile, True)\n', (103379, 103396), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((104416, 104435), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil', 'HtmlGeneratorUtil', ([], {}), '()\n', (104433, 104435), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((104636, 104689), 'config.mainWindow.addToAudioPlayList', 'config.mainWindow.addToAudioPlayList', (['audioFile', '(True)'], {}), '(audioFile, True)\n', (104672, 104689), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((105809, 105828), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil', 'HtmlGeneratorUtil', ([], {}), '()\n', (105826, 105828), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((106029, 106082), 'config.mainWindow.addToAudioPlayList', 'config.mainWindow.addToAudioPlayList', (['audioFile', '(True)'], {}), '(audioFile, True)\n', (106065, 106082), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((110087, 110098), 'db.AGBTSData.AGBTSData', 'AGBTSData', ([], {}), '()\n', (110096, 110098), False, 'from db.AGBTSData import AGBTSData\n'), ((110674, 110685), 'db.AGBTSData.AGBTSData', 'AGBTSData', ([], {}), '()\n', (110683, 110685), False, 'from db.AGBTSData import AGBTSData\n'), ((110850, 110869), 'db.ToolsSqlite.CollectionsSqlite', 'CollectionsSqlite', ([], {}), '()\n', (110867, 110869), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((111059, 111078), 'db.ToolsSqlite.CollectionsSqlite', 'CollectionsSqlite', ([], {}), '()\n', (111076, 111078), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((111690, 111710), 'db.ToolsSqlite.Commentary', 'Commentary', (['"""Brooks"""'], {}), "('Brooks')\n", (111700, 111710), False, 'from db.ToolsSqlite import CrossReferenceSqlite, CollectionsSqlite, ImageSqlite, IndexesSqlite, EncyclopediaData, DictionaryData, ExlbData, SearchSqlite, Commentary, VerseData, WordData, BookData, Lexicon, LexiconData\n'), ((116949, 116961), 'util.BibleBooks.BibleBooks', 'BibleBooks', ([], {}), '()\n', (116959, 116961), False, 'from util.BibleBooks import BibleBooks\n'), ((118472, 118484), 'util.BibleBooks.BibleBooks', 'BibleBooks', ([], {}), '()\n', (118482, 118484), False, 'from util.BibleBooks import BibleBooks\n'), ((119802, 119814), 'util.BibleBooks.BibleBooks', 'BibleBooks', ([], {}), '()\n', (119812, 119814), False, 'from util.BibleBooks import BibleBooks\n'), ((125731, 125755), 'qtpy.QtWidgets.QApplication.clipboard', 'QApplication.clipboard', ([], {}), '()\n', (125753, 125755), False, 'from qtpy.QtWidgets import QApplication\n'), ((129529, 129540), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (129534, 129540), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((130682, 130693), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (130687, 130693), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((134741, 134777), 'os.path.join', 'os.path.join', (['*marvelBibles[text][0]'], {}), '(*marvelBibles[text][0])\n', (134753, 134777), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((136445, 136481), 'os.path.join', 'os.path.join', (['*marvelBibles[text][0]'], {}), '(*marvelBibles[text][0])\n', (136457, 136481), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((138228, 138264), 'os.path.join', 'os.path.join', (['*marvelBibles[text][0]'], {}), '(*marvelBibles[text][0])\n', (138240, 138264), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((147095, 147125), 'util.TextUtil.TextUtil.imageToText', 'TextUtil.imageToText', (['fullPath'], {}), '(fullPath)\n', (147115, 147125), False, 'from util.TextUtil import TextUtil\n'), ((147662, 147686), 'gui.ImageViewer.ImageViewer', 'ImageViewer', (['self.parent'], {}), '(self.parent)\n', (147673, 147686), False, 'from gui.ImageViewer import ImageViewer\n'), ((157287, 157306), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil', 'HtmlGeneratorUtil', ([], {}), '()\n', (157304, 157306), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((159332, 159351), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil', 'HtmlGeneratorUtil', ([], {}), '()\n', (159349, 159351), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((172641, 172652), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (172646, 172652), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((172994, 173065), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['bible_dir'], {'recursive': '(True)', 'required_exts': "['.md']"}), "(bible_dir, recursive=True, required_exts=['.md'])\n", (173015, 173065), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n'), ((181505, 181529), 'qtpy.QtWidgets.QApplication.clipboard', 'QApplication.clipboard', ([], {}), '()\n', (181527, 181529), False, 'from qtpy.QtWidgets import QApplication\n'), ((183907, 184008), 're.compile', 're.compile', (['"""\\\\[<ref onclick="searchCode\\\\(\'([^\']+?)\',\'([^\']+?)\'\\\\)">search</ref>\\\\]"""'], {}), '(\n \'\\\\[<ref onclick="searchCode\\\\(\\\'([^\\\']+?)\\\',\\\'([^\\\']+?)\\\'\\\\)">search</ref>\\\\]\'\n )\n', (183917, 184008), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((184587, 184699), 're.compile', 're.compile', (['"""<u><b><ref onclick="(rmac|etcbcmorph|lxxmorph)\\\\(\'([^\']+?)\'\\\\)">[^<>]*?</ref></b></u>"""'], {}), '(\n \'<u><b><ref onclick="(rmac|etcbcmorph|lxxmorph)\\\\(\\\'([^\\\']+?)\\\'\\\\)">[^<>]*?</ref></b></u>\'\n )\n', (184597, 184699), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((192791, 192809), 'config.help.keys', 'config.help.keys', ([], {}), '()\n', (192807, 192809), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((193521, 193539), 'config.help.keys', 'config.help.keys', ([], {}), '()\n', (193537, 193539), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((203352, 203374), 'db.BiblesSqlite.Bible', 'Bible', (['config.mainText'], {}), '(config.mainText)\n', (203357, 203374), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((211823, 211850), 're.sub', 're.sub', (['"""\\\\..*?$"""', '""""""', 'item'], {}), "('\\\\..*?$', '', item)\n", (211829, 211850), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((219116, 219163), 're.sub', 're.sub', (['"""(<pm>|</pm>|<n>|</n>)"""', '""""""', 'subContent'], {}), "('(<pm>|</pm>|<n>|</n>)', '', subContent)\n", (219122, 219163), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((228527, 228557), 'os.path.join', 'os.path.join', (['*databaseInfo[0]'], {}), '(*databaseInfo[0])\n', (228539, 228557), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((230647, 230682), 'os.path.join', 'os.path.join', (['folder', 'shortFilename'], {}), '(folder, shortFilename)\n', (230659, 230682), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((230850, 230894), 'os.path.join', 'os.path.join', (['folder', "(shortFilename + '.zip')"], {}), "(folder, shortFilename + '.zip')\n", (230862, 230894), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((231483, 231498), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (231492, 231498), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((69593, 69631), 'os.path.join', 'os.path.join', (['formattedBiblesFolder', 'f'], {}), '(formattedBiblesFolder, f)\n', (69605, 69631), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((69666, 69689), 're.search', 're.search', (['"""^[\\\\._]"""', 'f'], {}), "('^[\\\\._]', f)\n", (69675, 69689), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((97745, 97763), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (97758, 97763), False, 'import glob, pprint, traceback, pydoc, threading, asyncio\n'), ((98684, 98695), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (98689, 98695), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((102359, 102378), 'util.HtmlGeneratorUtil.HtmlGeneratorUtil', 'HtmlGeneratorUtil', ([], {}), '()\n', (102376, 102378), False, 'from util.HtmlGeneratorUtil import HtmlGeneratorUtil\n'), ((103469, 103570), 'util.VlcUtil.VlcUtil.playMediaFile', 'VlcUtil.playMediaFile', (['audioFile', 'config.vlcSpeed', '(not config.hideVlcInterfaceReadingSingleVerse)'], {}), '(audioFile, config.vlcSpeed, not config.\n hideVlcInterfaceReadingSingleVerse)\n', (103490, 103570), False, 'from util.VlcUtil import VlcUtil\n'), ((104762, 104818), 'util.VlcUtil.VlcUtil.playMediaFile', 'VlcUtil.playMediaFile', (['audioFile', 'config.vlcSpeed', '(False)'], {}), '(audioFile, config.vlcSpeed, False)\n', (104783, 104818), False, 'from util.VlcUtil import VlcUtil\n'), ((106155, 106211), 'util.VlcUtil.VlcUtil.playMediaFile', 'VlcUtil.playMediaFile', (['audioFile', 'config.vlcSpeed', '(False)'], {}), '(audioFile, config.vlcSpeed, False)\n', (106176, 106211), False, 'from util.VlcUtil import VlcUtil\n'), ((112813, 112824), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (112818, 112824), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((121639, 121651), 'util.BibleBooks.BibleBooks', 'BibleBooks', ([], {}), '()\n', (121649, 121651), False, 'from util.BibleBooks import BibleBooks\n'), ((139903, 139915), 'db.NoteSqlite.NoteSqlite', 'NoteSqlite', ([], {}), '()\n', (139913, 139915), False, 'from db.NoteSqlite import NoteSqlite\n'), ((140774, 140786), 'db.NoteSqlite.NoteSqlite', 'NoteSqlite', ([], {}), '()\n', (140784, 140786), False, 'from db.NoteSqlite import NoteSqlite\n'), ((141688, 141700), 'db.NoteSqlite.NoteSqlite', 'NoteSqlite', ([], {}), '()\n', (141698, 141700), False, 'from db.NoteSqlite import NoteSqlite\n'), ((147263, 147293), 'util.TextUtil.TextUtil.imageToText', 'TextUtil.imageToText', (['filePath'], {}), '(filePath)\n', (147283, 147293), False, 'from util.TextUtil import TextUtil\n'), ((171899, 171913), 'db.BiblesSqlite.BiblesSqlite', 'BiblesSqlite', ([], {}), '()\n', (171911, 171913), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((215144, 215166), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (215160, 215166), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((219833, 219998), 're.sub', 're.sub', (['"""(<ref onclick="document.title=\')READWORD(.*?)(<tlit>[^<>]*?</tlit><br><hlr><heb>[^<>]+?</heb>)"""', '"""\\\\1READWORD\\\\2\\\\3 \\\\1READLEXEME\\\\2"""', 'subContent'], {}), '(\n \'(<ref onclick="document.title=\\\')READWORD(.*?)(<tlit>[^<>]*?</tlit><br><hlr><heb>[^<>]+?</heb>)\'\n , \'\\\\1READWORD\\\\2\\\\3 \\\\1READLEXEME\\\\2\', subContent)\n', (219839, 219998), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((220059, 220224), 're.sub', 're.sub', (['"""(<ref onclick="document.title=\')READWORD(.*?)(<tlit>[^<>]*?</tlit><br><hlr><grk>[^<>]+?</grk>)"""', '"""\\\\1READWORD\\\\2\\\\3 \\\\1READLEXEME\\\\2"""', 'subContent'], {}), '(\n \'(<ref onclick="document.title=\\\')READWORD(.*?)(<tlit>[^<>]*?</tlit><br><hlr><grk>[^<>]+?</grk>)\'\n , \'\\\\1READWORD\\\\2\\\\3 \\\\1READLEXEME\\\\2\', subContent)\n', (220065, 220224), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((225973, 225995), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (225989, 225995), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((231375, 231401), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file', '"""r"""'], {}), "(file, 'r')\n", (231390, 231401), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((54049, 54103), 're.sub', 're.sub', (['""" [0-9]+?:[0-9]+?$"""', '""""""', 'currentBibleReference'], {}), "(' [0-9]+?:[0-9]+?$', '', currentBibleReference)\n", (54055, 54103), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((88660, 88674), 'util.TtsLanguages.TtsLanguages', 'TtsLanguages', ([], {}), '()\n', (88672, 88674), False, 'from util.TtsLanguages import TtsLanguages\n'), ((132503, 132539), 'os.path.join', 'os.path.join', (['*marvelBibles[text][0]'], {}), '(*marvelBibles[text][0])\n', (132515, 132539), False, 'import os, re, webbrowser, platform, zipfile, subprocess, config\n'), ((133775, 133786), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (133780, 133786), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n'), ((86845, 86868), 'util.HebrewTransliteration.HebrewTransliteration', 'HebrewTransliteration', ([], {}), '()\n', (86866, 86868), False, 'from util.HebrewTransliteration import HebrewTransliteration\n'), ((158001, 158013), 'util.BibleBooks.BibleBooks', 'BibleBooks', ([], {}), '()\n', (158011, 158013), False, 'from util.BibleBooks import BibleBooks\n'), ((99349, 99360), 'db.BiblesSqlite.Bible', 'Bible', (['text'], {}), '(text)\n', (99354, 99360), False, 'from db.BiblesSqlite import BiblesSqlite, Bible, ClauseData\n')] |
import os
import pickle
import requests
from flask import Flask, jsonify, request
from flask_cors import CORS
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
from llama_index import GPTSimpleVectorIndex, download_loader
os.environ['OPENAI_API_KEY'] = 'xxxx'
def authorize_gdocs():
google_oauth2_scopes = [
"https://www.googleapis.com/auth/documents.readonly"
]
cred = None
if os.path.exists("token.pickle"):
with open("token.pickle", 'rb') as token:
cred = pickle.load(token)
if not cred or not cred.valid:
if cred and cred.expired and cred.refresh_token:
cred.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file("credentials.json", google_oauth2_scopes)
cred = flow.run_local_server(port=0)
with open("token.pickle", 'wb') as token:
pickle.dump(cred, token)
app = Flask(__name__)
CORS(app)
@app.route('/predict', methods=['POST'])
def predict():
data = request.json
print(data)
prompt = data['prompt']
response = index.query(prompt)
return jsonify({'response': response})
if __name__ == '__main__':
authorize_gdocs()
GoogleDocsReader = download_loader('GoogleDocsReader')
gdoc_ids = ['xxxx']
loader = GoogleDocsReader()
documents = loader.load_data(document_ids=gdoc_ids)
index = GPTSimpleVectorIndex(documents)
app.run(port=8000)
| [
"llama_index.GPTSimpleVectorIndex",
"llama_index.download_loader"
] | [((969, 984), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (974, 984), False, 'from flask import Flask, jsonify, request\n'), ((985, 994), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (989, 994), False, 'from flask_cors import CORS\n'), ((461, 491), 'os.path.exists', 'os.path.exists', (['"""token.pickle"""'], {}), "('token.pickle')\n", (475, 491), False, 'import os\n'), ((1166, 1197), 'flask.jsonify', 'jsonify', (["{'response': response}"], {}), "({'response': response})\n", (1173, 1197), False, 'from flask import Flask, jsonify, request\n'), ((1271, 1306), 'llama_index.download_loader', 'download_loader', (['"""GoogleDocsReader"""'], {}), "('GoogleDocsReader')\n", (1286, 1306), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((1431, 1462), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {}), '(documents)\n', (1451, 1462), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((562, 580), 'pickle.load', 'pickle.load', (['token'], {}), '(token)\n', (573, 580), False, 'import pickle\n'), ((742, 829), 'google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file', 'InstalledAppFlow.from_client_secrets_file', (['"""credentials.json"""', 'google_oauth2_scopes'], {}), "('credentials.json',\n google_oauth2_scopes)\n", (783, 829), False, 'from google_auth_oauthlib.flow import InstalledAppFlow\n'), ((937, 961), 'pickle.dump', 'pickle.dump', (['cred', 'token'], {}), '(cred, token)\n', (948, 961), False, 'import pickle\n'), ((698, 707), 'google.auth.transport.requests.Request', 'Request', ([], {}), '()\n', (705, 707), False, 'from google.auth.transport.requests import Request\n')] |
import os
import zipfile
from pathlib import Path
from dotenv import load_dotenv
from llama_index.core import (KnowledgeGraphIndex, SimpleDirectoryReader,
StorageContext, load_index_from_storage)
from llama_index.graph_stores.neo4j import Neo4jGraphStore
from llama_index.llms.openai import OpenAI
from transformations.docs_to_llama_index import custom_docs_to_llama_index
env_path = Path(__file__).resolve().parent.parent / ".env"
load_dotenv(dotenv_path=env_path)
neoURI = os.getenv("NEO4J_URI")
neoPass = os.getenv("NEO4J_PASSWORD")
import logging
import sys
import gdown
from llama_index.core import Settings
from llama_index.llms.openai import OpenAI
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
llm = OpenAI(temperature=0, model="gpt-3.5-turbo", api_key=os.getenv("OPENAI_API_KEY"))
Settings.llm = llm
Settings.chunk_size = 512
def download_and_extract_file_from_google_drive(gdrive_url, destination, extract_to):
# Use gdown to download the file
os.makedirs(extract_to, exist_ok=True)
gdown.download(gdrive_url, destination, quiet=False)
print("File downloaded successfully")
with zipfile.ZipFile(destination, 'r') as zip_ref:
zip_ref.extractall(extract_to)
print("File extracted successfully")
os.remove(destination)
print("Zip file removed")
def create_neo4j_graph_store():
username = "neo4j"
password = neoPass
url = neoURI
database = "neo4j"
graph_store = Neo4jGraphStore(
username=username,
password=password,
url=url,
database=database,
)
storage_context = StorageContext.from_defaults(graph_store=graph_store)
documents = SimpleDirectoryReader(
"./retrievers/data/chunks", filename_as_id=True
).load_data()
index = KnowledgeGraphIndex.from_documents(
documents,
storage_context=storage_context,
max_triplets_per_chunk=2,
include_embeddings=True,
)
print("finished saving nodes")
index.storage_context.persist(persist_dir="./retrievers/neo/storage")
print("index_saved")
def get_neo4j_retriever(query, index, customDocuments):
if len(customDocuments) > 0:
documents = custom_docs_to_llama_index(customDocuments)
index = KnowledgeGraphIndex.from_documents(
documents,
max_triplets_per_chunk=2,
include_embeddings=True,
)
query_engine = index.as_query_engine(
include_text=True,
response_mode="tree_summarize",
embedding_mode="hybrid",
)
else:
query_engine = index.as_query_engine(
include_text=True,
response_mode="tree_summarize",
embedding_mode="hybrid",
)
response = query_engine.query(query)
content_metadata_pairs = [
{"content": response.response, "metadata": response.get_formatted_sources()}
]
return {"documents": content_metadata_pairs}
storage_dir = "./retrievers/neo/storage"
if not os.path.exists(storage_dir) or not os.listdir(storage_dir):
if os.getenv("CREATE_NEO4J_GRAPH_STORE") == "true":
create_neo4j_graph_store()
else:
gdrive_url = 'https://drive.google.com/uc?id=1ZsA3cfKOSPrQI9WKCVF85UsNPxqf4s3z'
destination = os.path.join(storage_dir, 'neo4j_graph_store.zip') # Assuming the file is a zip
download_and_extract_file_from_google_drive(gdrive_url, destination, storage_dir) | [
"llama_index.core.KnowledgeGraphIndex.from_documents",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader",
"llama_index.graph_stores.neo4j.Neo4jGraphStore"
] | [((464, 497), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': 'env_path'}), '(dotenv_path=env_path)\n', (475, 497), False, 'from dotenv import load_dotenv\n'), ((508, 530), 'os.getenv', 'os.getenv', (['"""NEO4J_URI"""'], {}), "('NEO4J_URI')\n", (517, 530), False, 'import os\n'), ((541, 568), 'os.getenv', 'os.getenv', (['"""NEO4J_PASSWORD"""'], {}), "('NEO4J_PASSWORD')\n", (550, 568), False, 'import os\n'), ((692, 750), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (711, 750), False, 'import logging\n'), ((1013, 1051), 'os.makedirs', 'os.makedirs', (['extract_to'], {'exist_ok': '(True)'}), '(extract_to, exist_ok=True)\n', (1024, 1051), False, 'import os\n'), ((1057, 1109), 'gdown.download', 'gdown.download', (['gdrive_url', 'destination'], {'quiet': '(False)'}), '(gdrive_url, destination, quiet=False)\n', (1071, 1109), False, 'import gdown\n'), ((1294, 1316), 'os.remove', 'os.remove', (['destination'], {}), '(destination)\n', (1303, 1316), False, 'import os\n'), ((1488, 1574), 'llama_index.graph_stores.neo4j.Neo4jGraphStore', 'Neo4jGraphStore', ([], {'username': 'username', 'password': 'password', 'url': 'url', 'database': 'database'}), '(username=username, password=password, url=url, database=\n database)\n', (1503, 1574), False, 'from llama_index.graph_stores.neo4j import Neo4jGraphStore\n'), ((1632, 1685), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'graph_store': 'graph_store'}), '(graph_store=graph_store)\n', (1660, 1685), False, 'from llama_index.core import KnowledgeGraphIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((1813, 1947), 'llama_index.core.KnowledgeGraphIndex.from_documents', 'KnowledgeGraphIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'max_triplets_per_chunk': '(2)', 'include_embeddings': '(True)'}), '(documents, storage_context=\n storage_context, max_triplets_per_chunk=2, include_embeddings=True)\n', (1847, 1947), False, 'from llama_index.core import KnowledgeGraphIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((811, 838), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (820, 838), False, 'import os\n'), ((1163, 1196), 'zipfile.ZipFile', 'zipfile.ZipFile', (['destination', '"""r"""'], {}), "(destination, 'r')\n", (1178, 1196), False, 'import zipfile\n'), ((2230, 2273), 'transformations.docs_to_llama_index.custom_docs_to_llama_index', 'custom_docs_to_llama_index', (['customDocuments'], {}), '(customDocuments)\n', (2256, 2273), False, 'from transformations.docs_to_llama_index import custom_docs_to_llama_index\n'), ((2291, 2391), 'llama_index.core.KnowledgeGraphIndex.from_documents', 'KnowledgeGraphIndex.from_documents', (['documents'], {'max_triplets_per_chunk': '(2)', 'include_embeddings': '(True)'}), '(documents, max_triplets_per_chunk=2,\n include_embeddings=True)\n', (2325, 2391), False, 'from llama_index.core import KnowledgeGraphIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((3046, 3073), 'os.path.exists', 'os.path.exists', (['storage_dir'], {}), '(storage_dir)\n', (3060, 3073), False, 'import os\n'), ((3081, 3104), 'os.listdir', 'os.listdir', (['storage_dir'], {}), '(storage_dir)\n', (3091, 3104), False, 'import os\n'), ((3113, 3150), 'os.getenv', 'os.getenv', (['"""CREATE_NEO4J_GRAPH_STORE"""'], {}), "('CREATE_NEO4J_GRAPH_STORE')\n", (3122, 3150), False, 'import os\n'), ((3317, 3367), 'os.path.join', 'os.path.join', (['storage_dir', '"""neo4j_graph_store.zip"""'], {}), "(storage_dir, 'neo4j_graph_store.zip')\n", (3329, 3367), False, 'import os\n'), ((1703, 1773), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./retrievers/data/chunks"""'], {'filename_as_id': '(True)'}), "('./retrievers/data/chunks', filename_as_id=True)\n", (1724, 1773), False, 'from llama_index.core import KnowledgeGraphIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((416, 430), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (420, 430), False, 'from pathlib import Path\n')] |
import tempfile, os
from routers.utils.engine import Engine
from llama_index import download_loader
from fastapi import APIRouter, UploadFile, HTTPException
engine = Engine()
router = APIRouter()
@router.post("/pdf")
async def file(upload_file: UploadFile, namespace: str):
"""
Loader: https://llamahub.ai/l/file-pymu_pdf
"""
file_preview_name, file_extension = os.path.splitext(upload_file.filename)
if file_extension != '.pdf':
raise HTTPException(status_code=400, detail="File must be a PDF")
with tempfile.NamedTemporaryFile(delete=True, prefix=file_preview_name + '_', suffix=".pdf") as temp_file:
content = await upload_file.read()
temp_file.write(content)
PyMuPDFReader = download_loader("PyMuPDFReader")
loader = PyMuPDFReader().load(file_path=temp_file.name, metadata=True)
engine.load(loader, namespace)
return {'message': 'File uploaded successfully', 'filename': upload_file.filename, "namespace": namespace} | [
"llama_index.download_loader"
] | [((167, 175), 'routers.utils.engine.Engine', 'Engine', ([], {}), '()\n', (173, 175), False, 'from routers.utils.engine import Engine\n'), ((185, 196), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (194, 196), False, 'from fastapi import APIRouter, UploadFile, HTTPException\n'), ((380, 418), 'os.path.splitext', 'os.path.splitext', (['upload_file.filename'], {}), '(upload_file.filename)\n', (396, 418), False, 'import tempfile, os\n'), ((466, 525), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""File must be a PDF"""'}), "(status_code=400, detail='File must be a PDF')\n", (479, 525), False, 'from fastapi import APIRouter, UploadFile, HTTPException\n'), ((540, 631), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(True)', 'prefix': "(file_preview_name + '_')", 'suffix': '""".pdf"""'}), "(delete=True, prefix=file_preview_name + '_',\n suffix='.pdf')\n", (567, 631), False, 'import tempfile, os\n'), ((742, 774), 'llama_index.download_loader', 'download_loader', (['"""PyMuPDFReader"""'], {}), "('PyMuPDFReader')\n", (757, 774), False, 'from llama_index import download_loader\n')] |
import os
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, download_loader
from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, JSONReader
from langchain.llms import OpenAIChat, OpenAI
from zipfile import ZipFile
from googlesearch import search as google_search
from baidusearch.baidusearch import search as baidu_search
import traceback
import openai
from utils import *
def save_index(index, index_name, exist_ok=False):
file_path = f"./index/{index_name}.json"
if not os.path.exists(file_path) or exist_ok:
index.save_to_disk(file_path)
print(f'Saved file "{file_path}".')
else:
i = 1
while True:
new_file_path = f'{os.path.splitext(file_path)[0]}_{i}{os.path.splitext(file_path)[1]}'
if not os.path.exists(new_file_path):
index.save_to_disk(new_file_path)
print(f'Saved file "{new_file_path}".')
break
i += 1
def construct_index(api_key, file_list, index_name, max_input_size=4096, num_outputs=512, max_chunk_overlap=20, raw=False):
documents = []
if not raw:
txt_set = []
for file in file_list:
if os.path.splitext(file.name)[1] == '.pdf':
CJKPDFReader = download_loader("CJKPDFReader")
loader = CJKPDFReader()
documents += loader.load_data(file=file.name)
elif os.path.splitext(file.name)[1] == '.docx':
DocxReader = download_loader("DocxReader")
loader = DocxReader()
documents += loader.load_data(file=file.name)
elif os.path.splitext(file.name)[1] == '.epub':
EpubReader = download_loader("EpubReader")
loader = EpubReader()
documents += loader.load_data(file=file.name)
else:
with open(file.name, 'r', encoding="utf-8") as f:
txt_set.append(f.read())
documents += [Document(k) for k in txt_set]
else:
documents += [Document(k.text.encode("UTF-8", errors="strict").decode()) for k in file_list]
# Customizing LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=api_key))
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap)
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
if not raw:
save_index(index, index_name)
newlist = refresh_json_list(plain=True)
return newlist, newlist
else:
save_index(index, index_name, exist_ok=True)
return index
def chat_ai(api_key, index_select, question, prompt_tmpl, sim_k, chat_tone ,context, chatbot, search_mode=[], suggested_user_question = ""):
os.environ["OPENAI_API_KEY"] = api_key
print(f"Question: {question}")
if question=="":
question = suggested_user_question
if chat_tone == 0:
temprature = 2
elif chat_tone == 1:
temprature = 1
else:
temprature = 0.5
if not search_mode:
response = ask_ai(api_key, index_select, question, prompt_tmpl, sim_k, temprature, context)
else:
print(f"You asked: {question}")
BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
loader = BeautifulSoupWebReader()
chat = OpenAI(model_name="gpt-3.5-turbo", openai_api_key=api_key)
search_terms = chat.generate([f"Please extract search terms from the user’s question. The search terms is a concise sentence, which will be searched on Google to obtain relevant information to answer the user’s question, too generalized search terms doesn’t help. Please provide no more than two search terms. Please provide the most relevant search terms only, the search terms should directly correspond to the user’s question. Please separate different search items with commas, with no quote marks. The user’s question is: {question}"]).generations[0][0].text.strip()
search_terms = search_terms.replace('"', '')
search_terms = search_terms.replace(".", "")
links = []
for keywords in search_terms.split(","):
keywords = keywords.strip()
for search_engine in search_mode:
if "Google" in search_engine:
print(f"Googling: {keywords}")
search_iter = google_search(keywords, num_results=5)
links += [next(search_iter) for _ in range(10)]
if "Baidu" in search_engine:
print(f"Baiduing: {keywords}")
search_results = baidu_search(keywords, num_results=5)
links += [i["url"] for i in search_results if i["url"].startswith("http") and (not "@" in i["url"])]
if "Manual" in search_engine:
print(f"Searching manually: {keywords}")
print("Please input links manually. (Enter 'q' to quit.)")
while True:
link = input("请手动输入一个链接:\n")
if link == "q":
break
else:
links.append(link)
links = list(set(links))
if len(links) == 0:
msg = "No links found."
print(msg)
chatbot.append((question, msg))
return context, chatbot, gr.Dropdown.update(choices=[])
print("Extracting data from links...")
print('\n'.join(links))
documents = loader.load_data(urls=links)
# convert to utf-8 encoding
index = construct_index(api_key, documents, " ".join(search_terms.split(",")), raw=True)
print("Generating response...")
response = ask_ai(api_key, index_select, question, prompt_tmpl, sim_k, temprature, context, raw = index)
response = response.split("\n")
suggested_next_turns = []
for index, line in enumerate(response):
if "next user turn" in line:
suggested_next_turns = response[index+1:]
response = response[:index]
break
suggested_next_turns = [i.split()[1] for i in suggested_next_turns]
response = "\n".join(response)
response = parse_text(response)
context.append({"role": "user", "content": question})
context.append({"role": "assistant", "content": response})
chatbot.append((question, response))
os.environ["OPENAI_API_KEY"] = ""
return context, chatbot, gr.Dropdown.update(choices=suggested_next_turns)
def ask_ai(api_key, index_select, question, prompt_tmpl, sim_k=1, temprature=0, prefix_messages=[], raw = None):
os.environ["OPENAI_API_KEY"] = api_key
if raw is not None:
index = raw
else:
index = load_index(index_select)
prompt = QuestionAnswerPrompt(prompt_tmpl)
llm_predictor = LLMPredictor(llm=OpenAI(temperature=temprature, model_name="gpt-3.5-turbo", openai_api_key=api_key, prefix_messages=prefix_messages))
try:
response = index.query(question, llm_predictor=llm_predictor, similarity_top_k=sim_k, text_qa_template=prompt)
except:
traceback.print_exc()
return ""
print(f"Response: {response.response}")
os.environ["OPENAI_API_KEY"] = ""
return response.response
def load_index(index_name):
index_path = f"./index/{index_name}.json"
if not os.path.exists(index_path):
return None
index = GPTSimpleVectorIndex.load_from_disk(index_path)
return index
def display_json(json_select):
json_path = f"./index/{json_select}.json"
if not os.path.exists(json_path):
return None
documents = JSONReader().load_data(f"./index/{json_select}.json")
return documents[0]
| [
"llama_index.GPTSimpleVectorIndex",
"llama_index.download_loader",
"llama_index.QuestionAnswerPrompt",
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.JSONReader",
"llama_index.Document"
] | [((2322, 2382), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {}), '(max_input_size, num_outputs, max_chunk_overlap)\n', (2334, 2382), False, 'from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, JSONReader\n'), ((2396, 2490), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (2416, 2490), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, download_loader\n'), ((6873, 6906), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['prompt_tmpl'], {}), '(prompt_tmpl)\n', (6893, 6906), False, 'from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, JSONReader\n'), ((7511, 7558), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['index_path'], {}), '(index_path)\n', (7546, 7558), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, download_loader\n'), ((3327, 3368), 'llama_index.download_loader', 'download_loader', (['"""BeautifulSoupWebReader"""'], {}), "('BeautifulSoupWebReader')\n", (3342, 3368), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, download_loader\n'), ((3426, 3484), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'api_key'}), "(model_name='gpt-3.5-turbo', openai_api_key=api_key)\n", (3432, 3484), False, 'from langchain.llms import OpenAIChat, OpenAI\n'), ((7450, 7476), 'os.path.exists', 'os.path.exists', (['index_path'], {}), '(index_path)\n', (7464, 7476), False, 'import os\n'), ((7665, 7690), 'os.path.exists', 'os.path.exists', (['json_path'], {}), '(json_path)\n', (7679, 7690), False, 'import os\n'), ((533, 558), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (547, 558), False, 'import os\n'), ((2227, 2300), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'api_key'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_key=api_key)\n", (2233, 2300), False, 'from langchain.llms import OpenAIChat, OpenAI\n'), ((6945, 7065), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': 'temprature', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'api_key', 'prefix_messages': 'prefix_messages'}), "(temperature=temprature, model_name='gpt-3.5-turbo', openai_api_key=\n api_key, prefix_messages=prefix_messages)\n", (6951, 7065), False, 'from langchain.llms import OpenAIChat, OpenAI\n'), ((7211, 7232), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7230, 7232), False, 'import traceback\n'), ((7728, 7740), 'llama_index.JSONReader', 'JSONReader', ([], {}), '()\n', (7738, 7740), False, 'from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, JSONReader\n'), ((817, 846), 'os.path.exists', 'os.path.exists', (['new_file_path'], {}), '(new_file_path)\n', (831, 846), False, 'import os\n'), ((1295, 1326), 'llama_index.download_loader', 'download_loader', (['"""CJKPDFReader"""'], {}), "('CJKPDFReader')\n", (1310, 1326), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, download_loader\n'), ((1222, 1249), 'os.path.splitext', 'os.path.splitext', (['file.name'], {}), '(file.name)\n', (1238, 1249), False, 'import os\n'), ((1518, 1547), 'llama_index.download_loader', 'download_loader', (['"""DocxReader"""'], {}), "('DocxReader')\n", (1533, 1547), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, download_loader\n'), ((4456, 4494), 'googlesearch.search', 'google_search', (['keywords'], {'num_results': '(5)'}), '(keywords, num_results=5)\n', (4469, 4494), True, 'from googlesearch import search as google_search\n'), ((4696, 4733), 'baidusearch.baidusearch.search', 'baidu_search', (['keywords'], {'num_results': '(5)'}), '(keywords, num_results=5)\n', (4708, 4733), True, 'from baidusearch.baidusearch import search as baidu_search\n'), ((729, 756), 'os.path.splitext', 'os.path.splitext', (['file_path'], {}), '(file_path)\n', (745, 756), False, 'import os\n'), ((765, 792), 'os.path.splitext', 'os.path.splitext', (['file_path'], {}), '(file_path)\n', (781, 792), False, 'import os\n'), ((1446, 1473), 'os.path.splitext', 'os.path.splitext', (['file.name'], {}), '(file.name)\n', (1462, 1473), False, 'import os\n'), ((1737, 1766), 'llama_index.download_loader', 'download_loader', (['"""EpubReader"""'], {}), "('EpubReader')\n", (1752, 1766), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, download_loader\n'), ((1665, 1692), 'os.path.splitext', 'os.path.splitext', (['file.name'], {}), '(file.name)\n', (1681, 1692), False, 'import os\n'), ((2026, 2037), 'llama_index.Document', 'Document', (['k'], {}), '(k)\n', (2034, 2037), False, 'from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, JSONReader\n')] |
import json
from langchain.prompts import PromptTemplate
import tiktoken
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.node_parser import SimpleNodeParser
from llama_index.vector_stores import WeaviateVectorStore
from llama_index import (
VectorStoreIndex, SimpleDirectoryReader,
StorageContext, ServiceContext,
load_index_from_storage
)
import weaviate
import streamlit as st
from app_state import (state, init_app_state, _set_state_cb)
init_app_state() # ensure all state variables are initialized
from globals import (
VECTOR_STORE, OPENAI_MODELS_COMPLETIONS,
DEFAULT_MODEL_CONFIG, LANG_MODEL_PRICING
)
from common import scrape_articles
# DOCS CHAT PAGE ----------------------------------------------------------------
wc = None
# WEAVIATE CLOUD STORE
if VECTOR_STORE == 'Weaviate':
auth_config = weaviate.AuthApiKey(api_key=state.weaviate_api_key)
wc = weaviate.Client(
url=state.WEAVIATE_URL,
auth_client_secret=auth_config,
additional_headers={
"X-OpenAI-Api-Key": state.openai_api_key,
}
)
@st.cache_data(ttl=60*60, show_spinner=False)
def get_llm_doc_query_response(
query_prompt, model_name: str = DEFAULT_MODEL_CONFIG['completions_model'],
_service_context=ServiceContext.from_defaults()
):
# load index
# LOCAL STORE
if VECTOR_STORE == 'Local':
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./storage')
index = load_index_from_storage(storage_context)
# WEAVIATE CLOUD STORE
elif VECTOR_STORE == 'Weaviate':
vector_store = WeaviateVectorStore(weaviate_client = wc, index_name="Documents", text_key="content")
# set up the index
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=_service_context)
else:
raise ValueError(f'Unknown vector store {VECTOR_STORE}')
# get query engine over the index
query_engine = index.as_query_engine()
# query the index
response = query_engine.query(query_prompt)
response = response.response.replace('•', '*')
return response
def main(title, user_input_confirmed=False):
# Count token usage for cost estimation
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(state.completions_model).encode,
verbose=False # set to true to see usage printed to the console
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(callback_manager=callback_manager)
def _index_documents():
# load the documents
documents = SimpleDirectoryReader('docs').load_data()
# LOCAL STORE
# NOTE: Disallow if cloud deployment (temporary fix for public demo and/or if you
# don't have required file permissions or disk space)
if not json.loads(st.secrets['IS_CLOUD_DEPLOYMENT']) and VECTOR_STORE == 'Local':
# construct an index over these documents... saved in memory
index = VectorStoreIndex.from_documents(documents, show_progress=True, service_context=service_context)
# save index on disk
index.storage_context.persist(persist_dir='./storage')
# WEAVIATE CLOUD STORE
elif VECTOR_STORE == 'Weaviate':
wc.schema.delete_class("Documents")
class_obj = {
"class": "Documents",
"vectorizer": "text2vec-openai",
"moduleConfig": {
"text2vec-openai": {},
"generative-openai": {}
}
}
wc.schema.create_class(class_obj)
# chunk up the documents into nodes
parser = SimpleNodeParser.from_defaults(chunk_size=1024, chunk_overlap=20)
nodes = parser.get_nodes_from_documents(documents, show_progress=True)
# construct vector store
vector_store = WeaviateVectorStore(weaviate_client=wc, index_name="Documents", text_key="content")
# setting up the storage for the embeddings
storage_context = StorageContext.from_defaults(vector_store = vector_store)
# set up the index
index = VectorStoreIndex(nodes, storage_context=storage_context, show_progress=True, service_context=service_context)
else:
raise ValueError(f'Unknown vector store {VECTOR_STORE}')
print('---- Document Q&A ----', '\n',
'Indexing Embedding Tokens: ', token_counter.total_embedding_token_count, '\n')
with st.sidebar:
st.markdown(f'#### {title} Settings')
st.selectbox(
'OpenAI model', options=OPENAI_MODELS_COMPLETIONS,
on_change=_set_state_cb, kwargs={'completions_model': 'selectbox_docs_completions_model_name'},
index=OPENAI_MODELS_COMPLETIONS.index(state.completions_model),
help='Allowed models. Accuracy, speed, token consumption and costs will vary.',
key='selectbox_docs_completions_model_name'
)
include_history = st.checkbox('Include history in prompts', value=False)
if st.button('Clear history'):
state.questions = []
state.past = []
# NOTE: Hide indexing button if cloud deployment (temporary fix for public demo)
if not json.loads(st.secrets['IS_CLOUD_DEPLOYMENT']) and st.button('Index documents'):
with st.spinner("Indexing..."):
_index_documents()
# GPT completion models can not handle web sites, so we scrape the URL in the user input
user_input = state.user_input
if user_input.strip().startswith('http'):
scraped_texts = scrape_articles([user_input])['text']
user_input = scraped_texts[0] if scraped_texts else user_input
user_input = user_input.replace('\n', ' ').replace('\r', '') if user_input else user_input
if include_history:
context = '\n\n'.join([f'| Question: "{q}" | Answer: "{a}" |' for q, a in zip(state.questions, state.past)])
refinement = \
'Finally, return results in markdown text, include bullet point format where appropriate. ' + \
'Add additional web links at the end of the response if this is useful.'
prompt_template = "Given this context ### {context} ###. Answer or summarize this: ### {doc_query} ###. {refinement}"
prompt = PromptTemplate(input_variables=['context', 'doc_query', 'refinement'], template=prompt_template)
query_prompt = prompt.format(context=context, doc_query=user_input, refinement=refinement)
else:
refinement = \
'Return results in markdown text, include bullet point format where appropriate. ' + \
'Add additional web links at the end of the response if this is useful.'
prompt_template = "Answer or summarize this: ### {doc_query} ###. {refinement}"
prompt = PromptTemplate(input_variables=['doc_query', 'refinement'], template=prompt_template)
query_prompt = prompt.format(doc_query=user_input, refinement=refinement)
if user_input_confirmed and state.user_input:
with st.spinner("Generating query answer..."):
try:
response = get_llm_doc_query_response(query_prompt, model_name=state.completions_model, _service_context=service_context)
print('---- Document Q&A ----', '\n',
'Embedding Tokens: ', token_counter.total_embedding_token_count, '\n',
'LLM Prompt Tokens: ', token_counter.prompt_llm_token_count, '\n',
'LLM Completion Tokens: ', token_counter.completion_llm_token_count, '\n',
'Total LLM Token Count: ', token_counter.total_llm_token_count)
except Exception as ex:
st.warning(f'Index does not exist. Please index some documents.')
st.error(str(ex))
return
if state.user_input:
st.subheader('🙋🏽 Answer')
with st.spinner("Generating query answer..."):
try:
# This will use cached response!
response = get_llm_doc_query_response(query_prompt, model_name=state.completions_model, _service_context=service_context)
except Exception as ex:
st.warning(f'Index does not exist. Please index some documents.')
st.error(str(ex))
return
if state.user_input not in state.questions:
state.questions.append(state.user_input)
state.generated.append((state.user_input, response))
state.past.append(response)
st.markdown(response)
with st.expander('View conversation history', expanded=False):
st.markdown('\n\n'.join([f'---\n**Question**\n\n{q}\n\n**Answer**\n\n{a}' for q, a in zip(state.questions, state.past)]))
estimated_cost = ((token_counter.prompt_llm_token_count / 1000.0) * LANG_MODEL_PRICING[state.completions_model]['input']) + \
((token_counter.completion_llm_token_count / 1000.0) * LANG_MODEL_PRICING[state.completions_model]['output'])
print('Document Q&A Estimated Cost: $', estimated_cost)
state.estimated_cost_doc = estimated_cost
state.cumulative_cost += estimated_cost
return response
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.VectorStoreIndex",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.vector_stores.WeaviateVectorStore",
"llama_index.load_index_from_storage",
"llama_index.callbacks.CallbackManager"
] | [((499, 515), 'app_state.init_app_state', 'init_app_state', ([], {}), '()\n', (513, 515), False, 'from app_state import state, init_app_state, _set_state_cb\n'), ((1129, 1175), 'streamlit.cache_data', 'st.cache_data', ([], {'ttl': '(60 * 60)', 'show_spinner': '(False)'}), '(ttl=60 * 60, show_spinner=False)\n', (1142, 1175), True, 'import streamlit as st\n'), ((878, 929), 'weaviate.AuthApiKey', 'weaviate.AuthApiKey', ([], {'api_key': 'state.weaviate_api_key'}), '(api_key=state.weaviate_api_key)\n', (897, 929), False, 'import weaviate\n'), ((939, 1077), 'weaviate.Client', 'weaviate.Client', ([], {'url': 'state.WEAVIATE_URL', 'auth_client_secret': 'auth_config', 'additional_headers': "{'X-OpenAI-Api-Key': state.openai_api_key}"}), "(url=state.WEAVIATE_URL, auth_client_secret=auth_config,\n additional_headers={'X-OpenAI-Api-Key': state.openai_api_key})\n", (954, 1077), False, 'import weaviate\n'), ((1307, 1337), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (1335, 1337), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage\n'), ((2504, 2536), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (2519, 2536), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((2559, 2622), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'callback_manager': 'callback_manager'}), '(callback_manager=callback_manager)\n', (2587, 2622), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage\n'), ((1468, 1521), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1496, 1521), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage\n'), ((1538, 1578), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1561, 1578), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage\n'), ((4664, 4701), 'streamlit.markdown', 'st.markdown', (['f"""#### {title} Settings"""'], {}), "(f'#### {title} Settings')\n", (4675, 4701), True, 'import streamlit as st\n'), ((5155, 5209), 'streamlit.checkbox', 'st.checkbox', (['"""Include history in prompts"""'], {'value': '(False)'}), "('Include history in prompts', value=False)\n", (5166, 5209), True, 'import streamlit as st\n'), ((5221, 5247), 'streamlit.button', 'st.button', (['"""Clear history"""'], {}), "('Clear history')\n", (5230, 5247), True, 'import streamlit as st\n'), ((6480, 6580), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'doc_query', 'refinement']", 'template': 'prompt_template'}), "(input_variables=['context', 'doc_query', 'refinement'],\n template=prompt_template)\n", (6494, 6580), False, 'from langchain.prompts import PromptTemplate\n'), ((6998, 7088), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['doc_query', 'refinement']", 'template': 'prompt_template'}), "(input_variables=['doc_query', 'refinement'], template=\n prompt_template)\n", (7012, 7088), False, 'from langchain.prompts import PromptTemplate\n'), ((8056, 8081), 'streamlit.subheader', 'st.subheader', (['"""🙋🏽 Answer"""'], {}), "('🙋🏽 Answer')\n", (8068, 8081), True, 'import streamlit as st\n'), ((8748, 8769), 'streamlit.markdown', 'st.markdown', (['response'], {}), '(response)\n', (8759, 8769), True, 'import streamlit as st\n'), ((1667, 1755), 'llama_index.vector_stores.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'wc', 'index_name': '"""Documents"""', 'text_key': '"""content"""'}), "(weaviate_client=wc, index_name='Documents', text_key=\n 'content')\n", (1686, 1755), False, 'from llama_index.vector_stores import WeaviateVectorStore\n'), ((1796, 1895), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': '_service_context'}), '(vector_store=vector_store,\n service_context=_service_context)\n', (1830, 1895), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage\n'), ((3107, 3206), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'show_progress': '(True)', 'service_context': 'service_context'}), '(documents, show_progress=True,\n service_context=service_context)\n', (3138, 3206), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage\n'), ((5464, 5492), 'streamlit.button', 'st.button', (['"""Index documents"""'], {}), "('Index documents')\n", (5473, 5492), True, 'import streamlit as st\n'), ((5771, 5800), 'common.scrape_articles', 'scrape_articles', (['[user_input]'], {}), '([user_input])\n', (5786, 5800), False, 'from common import scrape_articles\n'), ((7230, 7270), 'streamlit.spinner', 'st.spinner', (['"""Generating query answer..."""'], {}), "('Generating query answer...')\n", (7240, 7270), True, 'import streamlit as st\n'), ((8095, 8135), 'streamlit.spinner', 'st.spinner', (['"""Generating query answer..."""'], {}), "('Generating query answer...')\n", (8105, 8135), True, 'import streamlit as st\n'), ((8593, 8633), 'app_state.state.questions.append', 'state.questions.append', (['state.user_input'], {}), '(state.user_input)\n', (8615, 8633), False, 'from app_state import state, init_app_state, _set_state_cb\n'), ((8646, 8698), 'app_state.state.generated.append', 'state.generated.append', (['(state.user_input, response)'], {}), '((state.user_input, response))\n', (8668, 8698), False, 'from app_state import state, init_app_state, _set_state_cb\n'), ((8711, 8738), 'app_state.state.past.append', 'state.past.append', (['response'], {}), '(response)\n', (8728, 8738), False, 'from app_state import state, init_app_state, _set_state_cb\n'), ((8784, 8840), 'streamlit.expander', 'st.expander', (['"""View conversation history"""'], {'expanded': '(False)'}), "('View conversation history', expanded=False)\n", (8795, 8840), True, 'import streamlit as st\n'), ((2341, 2393), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['state.completions_model'], {}), '(state.completions_model)\n', (2368, 2393), False, 'import tiktoken\n'), ((2706, 2735), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""docs"""'], {}), "('docs')\n", (2727, 2735), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage\n'), ((2939, 2984), 'json.loads', 'json.loads', (["st.secrets['IS_CLOUD_DEPLOYMENT']"], {}), "(st.secrets['IS_CLOUD_DEPLOYMENT'])\n", (2949, 2984), False, 'import json\n'), ((3806, 3871), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(20)'}), '(chunk_size=1024, chunk_overlap=20)\n', (3836, 3871), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((4019, 4107), 'llama_index.vector_stores.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'wc', 'index_name': '"""Documents"""', 'text_key': '"""content"""'}), "(weaviate_client=wc, index_name='Documents', text_key=\n 'content')\n", (4038, 4107), False, 'from llama_index.vector_stores import WeaviateVectorStore\n'), ((4189, 4244), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4217, 4244), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage\n'), ((4298, 4411), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'storage_context': 'storage_context', 'show_progress': '(True)', 'service_context': 'service_context'}), '(nodes, storage_context=storage_context, show_progress=True,\n service_context=service_context)\n', (4314, 4411), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage\n'), ((4913, 4969), 'globals.OPENAI_MODELS_COMPLETIONS.index', 'OPENAI_MODELS_COMPLETIONS.index', (['state.completions_model'], {}), '(state.completions_model)\n', (4944, 4969), False, 'from globals import VECTOR_STORE, OPENAI_MODELS_COMPLETIONS, DEFAULT_MODEL_CONFIG, LANG_MODEL_PRICING\n'), ((5414, 5459), 'json.loads', 'json.loads', (["st.secrets['IS_CLOUD_DEPLOYMENT']"], {}), "(st.secrets['IS_CLOUD_DEPLOYMENT'])\n", (5424, 5459), False, 'import json\n'), ((5511, 5536), 'streamlit.spinner', 'st.spinner', (['"""Indexing..."""'], {}), "('Indexing...')\n", (5521, 5536), True, 'import streamlit as st\n'), ((7899, 7964), 'streamlit.warning', 'st.warning', (['f"""Index does not exist. Please index some documents."""'], {}), "(f'Index does not exist. Please index some documents.')\n", (7909, 7964), True, 'import streamlit as st\n'), ((8393, 8458), 'streamlit.warning', 'st.warning', (['f"""Index does not exist. Please index some documents."""'], {}), "(f'Index does not exist. Please index some documents.')\n", (8403, 8458), True, 'import streamlit as st\n')] |
import logging
from typing import List, Optional
import tiktoken
from llama_index.indices.base import BaseIndex
from transformers import AutoTokenizer
from tiktoken import Encoding
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
ServiceContext,
StorageContext,
)
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index.node_parser import SimpleNodeParser
from llama_index import load_index_from_storage
import constants
from utils import check_index_files
class ChunkingTokenizer:
"""Tokenizer for chunking document data for creation of embeddings"""
def __init__(self, model_name: str):
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
def __call__(self, text: str) -> List[int]:
return self.tokenizer.encode(text)
class Config:
"""Configurations required for initializing the agent"""
_llm_tokenizer: Optional[Encoding] = None
def __init__(
self,
encoding_name: str,
embedding_model_name: str,
):
self._chunking_tokenizer = None
self.encoding_name = encoding_name
self.embedding_model_name = embedding_model_name
def initialize(self) -> VectorStoreIndex:
# Initialize tokenizer for text chunking
self._chunking_tokenizer = ChunkingTokenizer(self.embedding_model_name)
# Initialize text splitter
self._text_splitter = TokenTextSplitter(
chunk_size=constants.MAX_CHUNK_SIZE,
chunk_overlap=constants.CHUNK_OVERLAP,
tokenizer=self._chunking_tokenizer,
separator="\n\n",
backup_separators=["\n", " "],
)
# Initialize OpenAI LLM tokenizer
self.llm_tokenizer = tiktoken.get_encoding(self.encoding_name)
# Initialize vector index
return self._init_index()
def _init_index(self) -> VectorStoreIndex:
node_parser = SimpleNodeParser(text_splitter=self._text_splitter)
service_context = ServiceContext.from_defaults(
embed_model=f"local:{self.embedding_model_name}", node_parser=node_parser
)
index_id = constants.COMPANY_NAME
if check_index_files(constants.PERSIST_DIR):
storage_context = StorageContext.from_defaults(
persist_dir=constants.PERSIST_DIR
)
index = load_index_from_storage(
storage_context=storage_context,
service_context=service_context,
index_id=index_id,
)
return index
# If index does not exist, initialize index
logging.info('message="initialize index started"')
# Create index
documents = SimpleDirectoryReader(constants.DOCUMENT_DATA_DIR).load_data()
index = VectorStoreIndex.from_documents(
documents, service_context=service_context
)
index.set_index_id(index_id)
# Save index to disk
index.storage_context.persist(f"{constants.PERSIST_DIR}")
logging.info('message="initialize index completed"')
return index
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.langchain_helpers.text_splitter.TokenTextSplitter",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.load_index_from_storage"
] | [((697, 738), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (726, 738), False, 'from transformers import AutoTokenizer\n'), ((1440, 1628), 'llama_index.langchain_helpers.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': 'constants.MAX_CHUNK_SIZE', 'chunk_overlap': 'constants.CHUNK_OVERLAP', 'tokenizer': 'self._chunking_tokenizer', 'separator': '"""\n\n"""', 'backup_separators': "['\\n', ' ']"}), "(chunk_size=constants.MAX_CHUNK_SIZE, chunk_overlap=\n constants.CHUNK_OVERLAP, tokenizer=self._chunking_tokenizer, separator=\n '\\n\\n', backup_separators=['\\n', ' '])\n", (1457, 1628), False, 'from llama_index.langchain_helpers.text_splitter import TokenTextSplitter\n'), ((1762, 1803), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['self.encoding_name'], {}), '(self.encoding_name)\n', (1783, 1803), False, 'import tiktoken\n'), ((1943, 1994), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {'text_splitter': 'self._text_splitter'}), '(text_splitter=self._text_splitter)\n', (1959, 1994), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((2021, 2129), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'f"""local:{self.embedding_model_name}"""', 'node_parser': 'node_parser'}), "(embed_model=\n f'local:{self.embedding_model_name}', node_parser=node_parser)\n", (2049, 2129), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((2201, 2241), 'utils.check_index_files', 'check_index_files', (['constants.PERSIST_DIR'], {}), '(constants.PERSIST_DIR)\n', (2218, 2241), False, 'from utils import check_index_files\n'), ((2646, 2696), 'logging.info', 'logging.info', (['"""message="initialize index started\\""""'], {}), '(\'message="initialize index started"\')\n', (2658, 2696), False, 'import logging\n'), ((2819, 2894), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (2850, 2894), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((3057, 3109), 'logging.info', 'logging.info', (['"""message="initialize index completed\\""""'], {}), '(\'message="initialize index completed"\')\n', (3069, 3109), False, 'import logging\n'), ((2273, 2336), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'constants.PERSIST_DIR'}), '(persist_dir=constants.PERSIST_DIR)\n', (2301, 2336), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((2387, 2500), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context', 'index_id': 'index_id'}), '(storage_context=storage_context, service_context=\n service_context, index_id=index_id)\n', (2410, 2500), False, 'from llama_index import load_index_from_storage\n'), ((2740, 2790), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['constants.DOCUMENT_DATA_DIR'], {}), '(constants.DOCUMENT_DATA_DIR)\n', (2761, 2790), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n')] |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Tuple
from uuid import uuid4
from llama_index.legacy.llm_predictor.vellum.types import (
VellumCompiledPrompt,
VellumRegisteredPrompt,
)
from llama_index.legacy.llm_predictor.vellum.utils import convert_to_kebab_case
from llama_index.legacy.prompts import BasePromptTemplate
from llama_index.legacy.prompts.base import PromptTemplate
if TYPE_CHECKING:
import vellum
class VellumPromptRegistry:
"""Registers and retrieves prompts with Vellum.
LlamaIndex Prompts can be registered within Vellum, at which point Vellum becomes
the source of truth for the prompt. From there, Vellum can be used for prompt/model
experimentation, request monitoring, and more.
"""
def __init__(self, vellum_api_key: str) -> None:
import_err_msg = (
"`vellum` package not found, please run `pip install vellum-ai`"
)
try:
from vellum.client import Vellum
except ImportError:
raise ImportError(import_err_msg)
self._vellum_client = Vellum(api_key=vellum_api_key)
def from_prompt(self, initial_prompt: BasePromptTemplate) -> VellumRegisteredPrompt:
"""Accepts a LlamaIndex prompt and retrieves a corresponding registered prompt
from Vellum.
If the LlamaIndex prompt hasn't yet been registered, it'll be registered
automatically, after which point Vellum becomes the source-of-truth for the
prompt's definition.
In this way, the LlamaIndex prompt is treated as the initial value for the newly
registered prompt in Vellum.
You can reference a previously registered prompt by providing either
`vellum_deployment_id` or `vellum_deployment_name` as key/value pairs within
`BasePromptTemplate.metadata`.
"""
from vellum.core import ApiError
deployment_id = initial_prompt.metadata.get("vellum_deployment_id")
deployment_name = initial_prompt.metadata.get(
"vellum_deployment_name"
) or self._generate_default_name(initial_prompt)
registered_prompt: VellumRegisteredPrompt
try:
deployment = self._vellum_client.deployments.retrieve(
deployment_id or deployment_name
)
except ApiError as e:
if e.status_code == 404:
registered_prompt = self._register_prompt(initial_prompt)
else:
raise
else:
registered_prompt = self._get_registered_prompt(deployment)
return registered_prompt
def get_compiled_prompt(
self, registered_prompt: VellumRegisteredPrompt, input_values: Dict[str, Any]
) -> VellumCompiledPrompt:
"""Retrieves the fully-compiled prompt from Vellum, after all variable
substitutions, templating, etc.
"""
result = self._vellum_client.model_versions.model_version_compile_prompt(
registered_prompt.model_version_id, input_values=input_values
)
return VellumCompiledPrompt(
text=result.prompt.text, num_tokens=result.prompt.num_tokens
)
def _get_registered_prompt(
self, deployment: vellum.DeploymentRead
) -> VellumRegisteredPrompt:
"""Retrieves a prompt from Vellum, keying off of the deployment's id/name."""
# Assume that the deployment backing a registered prompt will always have a
# single model version. Note that this may not be true in the future once
# deployment-level A/B testing is supported and someone configures an A/B test.
model_version_id = deployment.active_model_version_ids[0]
model_version = self._vellum_client.model_versions.retrieve(model_version_id)
sandbox_snapshot_info = model_version.build_config.sandbox_snapshot
sandbox_snapshot_id = (
sandbox_snapshot_info.id if sandbox_snapshot_info else None
)
prompt_id = sandbox_snapshot_info.prompt_id if sandbox_snapshot_info else None
sandbox_id = sandbox_snapshot_info.sandbox_id if sandbox_snapshot_info else None
return VellumRegisteredPrompt(
deployment_id=deployment.id,
deployment_name=deployment.name,
model_version_id=model_version.id,
sandbox_id=sandbox_id,
sandbox_snapshot_id=sandbox_snapshot_id,
prompt_id=prompt_id,
)
def _register_prompt(self, prompt: BasePromptTemplate) -> VellumRegisteredPrompt:
"""Registers a prompt with Vellum.
By registering a prompt, Vellum will:
1) Create a Sandbox for the prompt so that you can experiment with the
prompt, LLM provider, model, and parameters via Vellum's UI.
2) Deployment for the prompt so that you can monitor requests and
update the prompt, LLM provider, model, and parameters via Vellum's UI
without requiring code changes.
"""
# Label represents a human-friendly name that'll be used for all created
# entities within Vellum. If not provided, a default will be generated.
label = prompt.metadata.get(
"vellum_deployment_label"
) or self._generate_default_label(prompt)
# Name represents a kebab-cased unique identifier that'll be used for all
# created entities within Vellum. If not provided, a default will be generated.
name = prompt.metadata.get(
"vellum_deployment_name"
) or self._generate_default_name(prompt)
# Note: For now, the initial provider, model, and parameters used to register
# the prompt are hard-coded. You can then update any of these from within
# Vellum's UI. As a future improvement, we could allow these to be specified
# upfront.
provider, model, params = self._get_default_llm_meta()
prompt_info = self._construct_prompt_info(prompt, for_chat_model=True)
resp = self._vellum_client.registered_prompts.register_prompt(
label=label,
name=name,
prompt=prompt_info,
provider=provider,
model=model,
parameters=params,
meta={
"source": "llamaindex",
"prompt_type": prompt.metadata["prompt_type"],
},
)
return VellumRegisteredPrompt(
deployment_id=resp.deployment.id,
deployment_name=resp.deployment.name,
model_version_id=resp.model_version.id,
sandbox_id=resp.sandbox.id,
sandbox_snapshot_id=resp.sandbox_snapshot.id,
prompt_id=resp.prompt.id,
)
def _generate_default_label(self, prompt: BasePromptTemplate) -> str:
prompt_type = prompt.metadata["prompt_type"]
return f"LlamaIndex Demo: {prompt_type}'"
def _generate_default_name(self, prompt: BasePromptTemplate) -> str:
default_label = self._generate_default_label(prompt)
return convert_to_kebab_case(default_label)
def _construct_prompt_info(
self, prompt: BasePromptTemplate, for_chat_model: bool = True
) -> vellum.RegisterPromptPromptInfoRequest:
"""Converts a LlamaIndex prompt into Vellum's prompt representation."""
import vellum
assert isinstance(prompt, PromptTemplate)
prompt_template = prompt.template
for input_variable in prompt.template_vars:
prompt_template = prompt_template.replace(
input_variable, f"{{ {input_variable} }}"
)
block: vellum.PromptTemplateBlockRequest
jinja_block = vellum.PromptTemplateBlockRequest(
id=str(uuid4()),
block_type=vellum.BlockTypeEnum.JINJA,
properties=vellum.PromptTemplateBlockPropertiesRequest(
template=self._prepare_prompt_jinja_template(
prompt.template,
prompt.template_vars,
),
),
)
if for_chat_model:
block = vellum.PromptTemplateBlockRequest(
id=str(uuid4()),
block_type=vellum.BlockTypeEnum.CHAT_MESSAGE,
properties=vellum.PromptTemplateBlockPropertiesRequest(
chat_role=vellum.ChatMessageRole.SYSTEM,
blocks=[jinja_block],
),
)
else:
block = jinja_block
return vellum.RegisterPromptPromptInfoRequest(
prompt_syntax_version=2,
prompt_block_data=vellum.PromptTemplateBlockDataRequest(
version=1,
blocks=[block],
),
input_variables=[{"key": input_var} for input_var in prompt.template_vars],
)
def _prepare_prompt_jinja_template(
self, original_template: str, input_variables: List[str]
) -> str:
"""Converts a prompt template into a Jinja template."""
prompt_template = original_template
for input_variable in input_variables:
prompt_template = prompt_template.replace(
("{" + input_variable + "}"), ("{{ " + input_variable + " }}")
)
return prompt_template
def _get_default_llm_meta(
self,
) -> Tuple[vellum.ProviderEnum, str, vellum.RegisterPromptModelParametersRequest]:
import vellum
return (
vellum.ProviderEnum.OPENAI,
"gpt-3.5-turbo",
vellum.RegisterPromptModelParametersRequest(
temperature=0.0,
max_tokens=256,
stop=[],
top_p=1.0,
top_k=0.0,
frequency_penalty=0.6,
presence_penalty=0.7,
logit_bias=None,
),
)
| [
"llama_index.legacy.llm_predictor.vellum.types.VellumCompiledPrompt",
"llama_index.legacy.llm_predictor.vellum.utils.convert_to_kebab_case",
"llama_index.legacy.llm_predictor.vellum.types.VellumRegisteredPrompt"
] | [((1114, 1144), 'vellum.client.Vellum', 'Vellum', ([], {'api_key': 'vellum_api_key'}), '(api_key=vellum_api_key)\n', (1120, 1144), False, 'from vellum.client import Vellum\n'), ((3100, 3187), 'llama_index.legacy.llm_predictor.vellum.types.VellumCompiledPrompt', 'VellumCompiledPrompt', ([], {'text': 'result.prompt.text', 'num_tokens': 'result.prompt.num_tokens'}), '(text=result.prompt.text, num_tokens=result.prompt.\n num_tokens)\n', (3120, 3187), False, 'from llama_index.legacy.llm_predictor.vellum.types import VellumCompiledPrompt, VellumRegisteredPrompt\n'), ((4194, 4408), 'llama_index.legacy.llm_predictor.vellum.types.VellumRegisteredPrompt', 'VellumRegisteredPrompt', ([], {'deployment_id': 'deployment.id', 'deployment_name': 'deployment.name', 'model_version_id': 'model_version.id', 'sandbox_id': 'sandbox_id', 'sandbox_snapshot_id': 'sandbox_snapshot_id', 'prompt_id': 'prompt_id'}), '(deployment_id=deployment.id, deployment_name=\n deployment.name, model_version_id=model_version.id, sandbox_id=\n sandbox_id, sandbox_snapshot_id=sandbox_snapshot_id, prompt_id=prompt_id)\n', (4216, 4408), False, 'from llama_index.legacy.llm_predictor.vellum.types import VellumCompiledPrompt, VellumRegisteredPrompt\n'), ((6422, 6670), 'llama_index.legacy.llm_predictor.vellum.types.VellumRegisteredPrompt', 'VellumRegisteredPrompt', ([], {'deployment_id': 'resp.deployment.id', 'deployment_name': 'resp.deployment.name', 'model_version_id': 'resp.model_version.id', 'sandbox_id': 'resp.sandbox.id', 'sandbox_snapshot_id': 'resp.sandbox_snapshot.id', 'prompt_id': 'resp.prompt.id'}), '(deployment_id=resp.deployment.id, deployment_name=\n resp.deployment.name, model_version_id=resp.model_version.id,\n sandbox_id=resp.sandbox.id, sandbox_snapshot_id=resp.sandbox_snapshot.\n id, prompt_id=resp.prompt.id)\n', (6444, 6670), False, 'from llama_index.legacy.llm_predictor.vellum.types import VellumCompiledPrompt, VellumRegisteredPrompt\n'), ((7068, 7104), 'llama_index.legacy.llm_predictor.vellum.utils.convert_to_kebab_case', 'convert_to_kebab_case', (['default_label'], {}), '(default_label)\n', (7089, 7104), False, 'from llama_index.legacy.llm_predictor.vellum.utils import convert_to_kebab_case\n'), ((9545, 9723), 'vellum.RegisterPromptModelParametersRequest', 'vellum.RegisterPromptModelParametersRequest', ([], {'temperature': '(0.0)', 'max_tokens': '(256)', 'stop': '[]', 'top_p': '(1.0)', 'top_k': '(0.0)', 'frequency_penalty': '(0.6)', 'presence_penalty': '(0.7)', 'logit_bias': 'None'}), '(temperature=0.0, max_tokens=256,\n stop=[], top_p=1.0, top_k=0.0, frequency_penalty=0.6, presence_penalty=\n 0.7, logit_bias=None)\n', (9588, 9723), False, 'import vellum\n'), ((8625, 8689), 'vellum.PromptTemplateBlockDataRequest', 'vellum.PromptTemplateBlockDataRequest', ([], {'version': '(1)', 'blocks': '[block]'}), '(version=1, blocks=[block])\n', (8662, 8689), False, 'import vellum\n'), ((7757, 7764), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (7762, 7764), False, 'from uuid import uuid4\n'), ((8275, 8386), 'vellum.PromptTemplateBlockPropertiesRequest', 'vellum.PromptTemplateBlockPropertiesRequest', ([], {'chat_role': 'vellum.ChatMessageRole.SYSTEM', 'blocks': '[jinja_block]'}), '(chat_role=vellum.\n ChatMessageRole.SYSTEM, blocks=[jinja_block])\n', (8318, 8386), False, 'import vellum\n'), ((8176, 8183), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (8181, 8183), False, 'from uuid import uuid4\n')] |
# Copyright 2023 osiworx
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import os
from llama_index import GPTVectorStoreIndex,VectorStoreIndex, StorageContext, SimpleDirectoryReader, ServiceContext
from llama_index.vector_stores import MilvusVectorStore
from llama_index.embeddings import HuggingFaceEmbedding
import torch
from llama_index.llms import HuggingFaceLLM
from llama_index.prompts import PromptTemplate
vector_store = MilvusVectorStore(
uri = "http://localhost:19530",
port = 19530 ,
collection_name = 'llama_index_prompts_all',
dim = 384,
similarity_metric = "L2",
)
sample_files_path = "E:\prompt_sources\lexica_split"
embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L12-v2")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
service_context = ServiceContext.from_defaults(llm=None, embed_model=embed_model)
for subdir, dirs, files in os.walk(sample_files_path):
if len(files) > 0:
now = datetime.datetime.now()
print(f'{now.strftime("%H:%M:%S")} adding folder: {subdir}')
documents = SimpleDirectoryReader(subdir).load_data()
docs = []
for doc in documents:
doc.excluded_llm_metadata_keys.append("file_path")
doc.excluded_embed_metadata_keys.append("file_path")
if doc.text != '':
docs = docs + [doc]
del documents
vector_index = VectorStoreIndex.from_documents(docs, storage_context=storage_context, service_context=service_context, show_progress=True)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.MilvusVectorStore",
"llama_index.StorageContext.from_defaults",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((946, 1086), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': '"""http://localhost:19530"""', 'port': '(19530)', 'collection_name': '"""llama_index_prompts_all"""', 'dim': '(384)', 'similarity_metric': '"""L2"""'}), "(uri='http://localhost:19530', port=19530, collection_name\n ='llama_index_prompts_all', dim=384, similarity_metric='L2')\n", (963, 1086), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((1188, 1262), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L12-v2"""'}), "(model_name='sentence-transformers/all-MiniLM-L12-v2')\n", (1208, 1262), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1282, 1337), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1310, 1337), False, 'from llama_index import GPTVectorStoreIndex, VectorStoreIndex, StorageContext, SimpleDirectoryReader, ServiceContext\n'), ((1357, 1420), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'embed_model'}), '(llm=None, embed_model=embed_model)\n', (1385, 1420), False, 'from llama_index import GPTVectorStoreIndex, VectorStoreIndex, StorageContext, SimpleDirectoryReader, ServiceContext\n'), ((1449, 1475), 'os.walk', 'os.walk', (['sample_files_path'], {}), '(sample_files_path)\n', (1456, 1475), False, 'import os\n'), ((1514, 1537), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1535, 1537), False, 'import datetime\n'), ((1961, 2088), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context', 'show_progress': '(True)'}), '(docs, storage_context=storage_context,\n service_context=service_context, show_progress=True)\n', (1992, 2088), False, 'from llama_index import GPTVectorStoreIndex, VectorStoreIndex, StorageContext, SimpleDirectoryReader, ServiceContext\n'), ((1628, 1657), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['subdir'], {}), '(subdir)\n', (1649, 1657), False, 'from llama_index import GPTVectorStoreIndex, VectorStoreIndex, StorageContext, SimpleDirectoryReader, ServiceContext\n')] |
import streamlit as st
import logging, sys, os
import openai
from dotenv import load_dotenv
from llama_index.agent import OpenAIAgent
from llama_index.llms import OpenAI
from llama_hub.tools.zapier.base import ZapierToolSpec
#loads dotenv lib to retrieve API keys from .env file
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# enable INFO level logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# add a title for our UI
st.title("GitHub Archive Analysis")
# add a text area for user to enter SQL query
sql_query = st.text_area("Enter your SQL query here")
if sql_query:
# establish connection to Snowflake
conn = st.experimental_connection('snowpark')
# run query based on the SQL entered
df = conn.query(sql_query)
# write query result on UI
st.write(df)
# add a line chart to display the result visually
st.line_chart(df, x="REPO_NAME", y="SUM_STARS")
# get the most-starred repo
top_repo = df.iloc[0, :]
# construct zapier_spec by passing in Zapier API key
zapier_spec = ZapierToolSpec(api_key=os.getenv("ZAPIER_API_KEY"))
# initialize llm
llm = OpenAI(model="gpt-3.5-turbo-0613")
# initialize OpenAI agent by passing in zapier_spec and the llm
agent = OpenAIAgent.from_tools(zapier_spec.to_tool_list(), verbose=True, llm=llm)
# add instructions
agent.chat(f"Send me an email on the details of {top_repo['REPO_NAME']}.")
agent.chat(f"Add a task to my CoSchedule calendar to check out {top_repo['REPO_NAME']} with due date August 3rd 2023.")
| [
"llama_index.llms.OpenAI"
] | [((280, 293), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (291, 293), False, 'from dotenv import load_dotenv\n'), ((311, 338), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (320, 338), False, 'import logging, sys, os\n'), ((368, 426), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (387, 426), False, 'import logging, sys, os\n'), ((526, 561), 'streamlit.title', 'st.title', (['"""GitHub Archive Analysis"""'], {}), "('GitHub Archive Analysis')\n", (534, 561), True, 'import streamlit as st\n'), ((621, 662), 'streamlit.text_area', 'st.text_area', (['"""Enter your SQL query here"""'], {}), "('Enter your SQL query here')\n", (633, 662), True, 'import streamlit as st\n'), ((458, 498), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (479, 498), False, 'import logging, sys, os\n'), ((729, 767), 'streamlit.experimental_connection', 'st.experimental_connection', (['"""snowpark"""'], {}), "('snowpark')\n", (755, 767), True, 'import streamlit as st\n'), ((878, 890), 'streamlit.write', 'st.write', (['df'], {}), '(df)\n', (886, 890), True, 'import streamlit as st\n'), ((954, 1001), 'streamlit.line_chart', 'st.line_chart', (['df'], {'x': '"""REPO_NAME"""', 'y': '"""SUM_STARS"""'}), "(df, x='REPO_NAME', y='SUM_STARS')\n", (967, 1001), True, 'import streamlit as st\n'), ((1228, 1262), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""'}), "(model='gpt-3.5-turbo-0613')\n", (1234, 1262), False, 'from llama_index.llms import OpenAI\n'), ((427, 446), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (444, 446), False, 'import logging, sys, os\n'), ((1167, 1194), 'os.getenv', 'os.getenv', (['"""ZAPIER_API_KEY"""'], {}), "('ZAPIER_API_KEY')\n", (1176, 1194), False, 'import logging, sys, os\n')] |
import os
from flask import Flask, render_template, request, redirect
from llama_index import download_loader
from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, ServiceContext
from langchain import OpenAI
os.environ["OPENAI_API_KEY"] = "insert_your_key_here"
current_script_path = os.path.dirname(os.path.abspath(__file__))
doc_path = os.path.join(current_script_path, 'data') + '/'
index_file = 'index.json'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = doc_path
app.config['MAX_CONTENT_LENGTH'] = 10 * 1024 * 1024 # 10 MB max file size
def send_click(prompt):
if index is None:
return "Index not loaded. Please upload a file first."
response = index.query(prompt)
return response
index = None
@app.route('/', methods=['GET', 'POST'])
def index_page():
global index
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
SimpleDirectoryReader = download_loader("SimpleDirectoryReader")
loader = SimpleDirectoryReader(doc_path, recursive=True, exclude_hidden=True)
documents = loader.load_data()
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo"))
max_input_size = 4096
num_output = 256
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTSimpleVectorIndex.from_documents(
documents, service_context=service_context
)
index.save_to_disk(index_file)
elif os.path.exists(index_file):
index = GPTSimpleVectorIndex.load_from_disk(index_file)
return render_template('index.html')
@app.route('/query', methods=['POST'])
def query():
if index is None:
return {'response': 'Index not loaded. Please upload a file first.'}
prompt = request.form['prompt']
response = send_click(prompt)
return {'response': str(response)}
if __name__ == '__main__':
# Run flask with the following defaults
app.run(debug=True, port=5000, host='0.0.0.0', )
| [
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((442, 457), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (447, 457), False, 'from flask import Flask, render_template, request, redirect\n'), ((322, 347), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (337, 347), False, 'import os\n'), ((360, 401), 'os.path.join', 'os.path.join', (['current_script_path', '"""data"""'], {}), "(current_script_path, 'data')\n", (372, 401), False, 'import os\n'), ((1980, 2009), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1995, 2009), False, 'from flask import Flask, render_template, request, redirect\n'), ((1165, 1205), 'llama_index.download_loader', 'download_loader', (['"""SimpleDirectoryReader"""'], {}), "('SimpleDirectoryReader')\n", (1180, 1205), False, 'from llama_index import download_loader\n'), ((1534, 1593), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (1546, 1593), False, 'from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, ServiceContext\n'), ((1620, 1711), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (1648, 1711), False, 'from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, ServiceContext\n'), ((1724, 1803), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1759, 1803), False, 'from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, ServiceContext\n'), ((1876, 1902), 'os.path.exists', 'os.path.exists', (['index_file'], {}), '(index_file)\n', (1890, 1902), False, 'import os\n'), ((923, 944), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (931, 944), False, 'from flask import Flask, render_template, request, redirect\n'), ((1033, 1054), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (1041, 1054), False, 'from flask import Flask, render_template, request, redirect\n'), ((1074, 1130), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'file.filename'], {}), "(app.config['UPLOAD_FOLDER'], file.filename)\n", (1086, 1130), False, 'import os\n'), ((1920, 1967), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['index_file'], {}), '(index_file)\n', (1955, 1967), False, 'from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, ServiceContext\n'), ((1373, 1422), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (1379, 1422), False, 'from langchain import OpenAI\n')] |
from llama_index import ServiceContext
from app.context import create_base_context
from app.engine.constants import CHUNK_OVERLAP, CHUNK_SIZE
def create_service_context():
base = create_base_context()
return ServiceContext.from_defaults(
llm=base.llm,
embed_model=base.embed_model,
chunk_size=CHUNK_SIZE,
chunk_overlap=CHUNK_OVERLAP,
)
| [
"llama_index.ServiceContext.from_defaults"
] | [((186, 207), 'app.context.create_base_context', 'create_base_context', ([], {}), '()\n', (205, 207), False, 'from app.context import create_base_context\n'), ((219, 347), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'base.llm', 'embed_model': 'base.embed_model', 'chunk_size': 'CHUNK_SIZE', 'chunk_overlap': 'CHUNK_OVERLAP'}), '(llm=base.llm, embed_model=base.embed_model,\n chunk_size=CHUNK_SIZE, chunk_overlap=CHUNK_OVERLAP)\n', (247, 347), False, 'from llama_index import ServiceContext\n')] |
from portkey_ai import Message, Portkey
from typing import Optional, Union, List, Any, Mapping, cast, Sequence
from portkey_ai.api_resources.utils import PortkeyResponse
from portkey_ai.llms.llama_index.utils import (
IMPORT_ERROR_MESSAGE,
is_chat_model,
modelname_to_contextsize,
)
try:
from llama_index.llms.custom import CustomLLM
from llama_index.bridge.pydantic import PrivateAttr
from llama_index.llms.base import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
llm_chat_callback,
llm_completion_callback,
)
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
class PortkeyLLM(CustomLLM):
"""_summary_.
Args:
LLM (_type_): _description_
"""
_client: Any = PrivateAttr()
model: str = ""
api_key: Optional[str] = None
base_url: Optional[str] = None
virtual_key: Optional[str] = None
config: Optional[Union[Mapping, str]] = None
provider: Optional[str] = None
trace_id: Optional[str] = None
custom_metadata: Optional[str] = None
def __init__(
self,
*,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
virtual_key: Optional[str] = None,
config: Optional[Union[Mapping, str]] = None,
provider: Optional[str] = None,
trace_id: Optional[str] = None,
custom_metadata: Optional[str] = None,
**kwargs,
) -> None:
"""
Initialize a Portkey instance.
Args:
api_key (Optional[str]): The API key to authenticate with Portkey.
base_url (Optional[str]): The Base url to the self hosted rubeus \
(the opensource version of portkey) or any other self hosted server.
"""
super().__init__(
base_url=base_url,
api_key=api_key,
)
self._client = Portkey(
api_key=api_key,
base_url=base_url,
virtual_key=virtual_key,
config=config,
provider=provider,
trace_id=trace_id,
metadata=custom_metadata,
**kwargs,
)
self.model = ""
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
try:
from llama_index.llms.base import (
LLMMetadata,
)
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
return LLMMetadata(
_context_window=modelname_to_contextsize(self.model) if self.model else 0,
is_chat_model=is_chat_model(self.model),
model_name=self.model,
)
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
"""Completion endpoint for LLM."""
complete_fn = self._complete
return complete_fn(prompt, **kwargs)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
chat_fn = self._chat
return chat_fn(messages, **kwargs)
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
"""Completion endpoint for LLM."""
complete_fn = self._stream_complete
return complete_fn(prompt, **kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
stream_chat_fn = self._stream_chat
return stream_chat_fn(messages, **kwargs)
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
_messages = cast(
List[Message],
[{"role": i.role.value, "content": i.content} for i in messages],
)
response = self._client.chat.completions.create(messages=_messages, **kwargs)
self.model = self._get_model(response)
message = response.choices[0].message
return ChatResponse(message=message, raw=response)
def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
response = self._client.completions.create(prompt=prompt, **kwargs)
text = response.choices[0].text
return CompletionResponse(text=text, raw=response)
def _stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
_messages = cast(
List[Message],
[{"role": i.role.value, "content": i.content} for i in messages],
)
response = self._client.chat.completions.create(
messages=_messages, stream=True, **kwargs
)
def gen() -> ChatResponseGen:
content = ""
function_call: Optional[dict] = {}
for resp in response:
if resp.choices is None:
continue
delta = resp.choices[0].delta
role = delta.get("role", "assistant")
content_delta = delta.get("content", "") or ""
content += content_delta
function_call_delta = delta.get("function_call", None)
if function_call_delta is not None:
if function_call is None:
function_call = function_call_delta
# ensure we do not add a blank function call
if (
function_call
and function_call.get("function_name", "") is None
):
del function_call["function_name"]
else:
function_call["arguments"] += function_call_delta["arguments"]
additional_kwargs = {}
if function_call is not None:
additional_kwargs["function_call"] = function_call
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
additional_kwargs=additional_kwargs,
),
delta=content_delta,
raw=resp,
)
return gen()
def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
response = self._client.completions.create(prompt=prompt, stream=True, **kwargs)
def gen() -> CompletionResponseGen:
text = ""
for resp in response:
delta = resp.choices[0].text or ""
text += delta
yield CompletionResponse(
delta=delta,
text=text,
raw=resp,
)
return gen()
@property
def _is_chat_model(self) -> bool:
"""Check if a given model is a chat-based language model.
Returns:
bool: True if the provided model is a chat-based language model,
False otherwise.
"""
return is_chat_model(self.model or "")
def _get_model(self, response: PortkeyResponse) -> str:
return response.model
| [
"llama_index.llms.base.llm_chat_callback",
"llama_index.llms.base.CompletionResponse",
"llama_index.llms.base.ChatResponse",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.llms.base.llm_completion_callback",
"llama_index.llms.base.ChatMessage"
] | [((866, 879), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (877, 879), False, 'from llama_index.bridge.pydantic import PrivateAttr\n'), ((2779, 2804), 'llama_index.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (2802, 2804), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((3010, 3029), 'llama_index.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (3027, 3029), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((3192, 3217), 'llama_index.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3215, 3217), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((3440, 3459), 'llama_index.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (3457, 3459), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((1989, 2155), 'portkey_ai.Portkey', 'Portkey', ([], {'api_key': 'api_key', 'base_url': 'base_url', 'virtual_key': 'virtual_key', 'config': 'config', 'provider': 'provider', 'trace_id': 'trace_id', 'metadata': 'custom_metadata'}), '(api_key=api_key, base_url=base_url, virtual_key=virtual_key, config\n =config, provider=provider, trace_id=trace_id, metadata=custom_metadata,\n **kwargs)\n', (1996, 2155), False, 'from portkey_ai import Message, Portkey\n'), ((3767, 3856), 'typing.cast', 'cast', (['List[Message]', "[{'role': i.role.value, 'content': i.content} for i in messages]"], {}), "(List[Message], [{'role': i.role.value, 'content': i.content} for i in\n messages])\n", (3771, 3856), False, 'from typing import Optional, Union, List, Any, Mapping, cast, Sequence\n'), ((4083, 4126), 'llama_index.llms.base.ChatResponse', 'ChatResponse', ([], {'message': 'message', 'raw': 'response'}), '(message=message, raw=response)\n', (4095, 4126), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((4334, 4377), 'llama_index.llms.base.CompletionResponse', 'CompletionResponse', ([], {'text': 'text', 'raw': 'response'}), '(text=text, raw=response)\n', (4352, 4377), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((4508, 4597), 'typing.cast', 'cast', (['List[Message]', "[{'role': i.role.value, 'content': i.content} for i in messages]"], {}), "(List[Message], [{'role': i.role.value, 'content': i.content} for i in\n messages])\n", (4512, 4597), False, 'from typing import Optional, Union, List, Any, Mapping, cast, Sequence\n'), ((7130, 7161), 'portkey_ai.llms.llama_index.utils.is_chat_model', 'is_chat_model', (["(self.model or '')"], {}), "(self.model or '')\n", (7143, 7161), False, 'from portkey_ai.llms.llama_index.utils import IMPORT_ERROR_MESSAGE, is_chat_model, modelname_to_contextsize\n'), ((2701, 2726), 'portkey_ai.llms.llama_index.utils.is_chat_model', 'is_chat_model', (['self.model'], {}), '(self.model)\n', (2714, 2726), False, 'from portkey_ai.llms.llama_index.utils import IMPORT_ERROR_MESSAGE, is_chat_model, modelname_to_contextsize\n'), ((2616, 2652), 'portkey_ai.llms.llama_index.utils.modelname_to_contextsize', 'modelname_to_contextsize', (['self.model'], {}), '(self.model)\n', (2640, 2652), False, 'from portkey_ai.llms.llama_index.utils import IMPORT_ERROR_MESSAGE, is_chat_model, modelname_to_contextsize\n'), ((6706, 6758), 'llama_index.llms.base.CompletionResponse', 'CompletionResponse', ([], {'delta': 'delta', 'text': 'text', 'raw': 'resp'}), '(delta=delta, text=text, raw=resp)\n', (6724, 6758), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((6043, 6119), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (6054, 6119), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, llm_chat_callback, llm_completion_callback\n')] |
# Import the required modules from the llama_index library
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, GPTListIndex
# Function to perform a search query on the documents
def search(query):
# Load the documents from the 'data' directory
documents = SimpleDirectoryReader('data').load_data()
# Create an index using the GPTVectorStoreIndex from the loaded documents
index = GPTVectorStoreIndex.from_documents(documents)
# Create a query engine using the index
query_engine = index.as_query_engine()
# Perform the search query on the query engine
response = query_engine.query(query)
# Return the response
return response
| [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((427, 472), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (461, 472), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, GPTListIndex\n'), ((287, 316), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (308, 316), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, GPTListIndex\n')] |
import logging
from typing import Optional, Union
from llama_index import ServiceContext
from llama_index.callbacks import CallbackManager
from llama_index.embeddings.utils import EmbedType
from llama_index.llms.utils import LLMType
from llama_index.prompts import PromptTemplate
from llama_index.prompts.base import BasePromptTemplate
from llama_index.node_parser import (
SimpleNodeParser,
)
logger = logging.getLogger(__name__)
class LyzrService:
@staticmethod
def from_defaults(
llm: Optional[LLMType] = "default",
embed_model: Optional[EmbedType] = "default",
system_prompt: str = None,
query_wrapper_prompt: Union[str, BasePromptTemplate] = None,
**kwargs,
) -> ServiceContext:
if isinstance(query_wrapper_prompt, str):
query_wrapper_prompt = PromptTemplate(template=query_wrapper_prompt)
callback_manager: CallbackManager = kwargs.get(
"callback_manager", CallbackManager()
)
node_parser = SimpleNodeParser.from_defaults(
chunk_size=750,
chunk_overlap=100,
callback_manager=callback_manager,
)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
callback_manager=callback_manager,
node_parser=node_parser,
**kwargs,
)
return service_context
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.CallbackManager",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.prompts.PromptTemplate"
] | [((409, 436), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (426, 436), False, 'import logging\n'), ((1016, 1120), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': '(750)', 'chunk_overlap': '(100)', 'callback_manager': 'callback_manager'}), '(chunk_size=750, chunk_overlap=100,\n callback_manager=callback_manager)\n', (1046, 1120), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1191, 1403), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt', 'callback_manager': 'callback_manager', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model,\n system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt,\n callback_manager=callback_manager, node_parser=node_parser, **kwargs)\n', (1219, 1403), False, 'from llama_index import ServiceContext\n'), ((830, 875), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'query_wrapper_prompt'}), '(template=query_wrapper_prompt)\n', (844, 875), False, 'from llama_index.prompts import PromptTemplate\n'), ((965, 982), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (980, 982), False, 'from llama_index.callbacks import CallbackManager\n')] |
from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex
from llama_index.response.pprint_utils import pprint_response
from langchain.chat_models import ChatOpenAI
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.query_engine import SubQuestionQueryEngine
from dotenv import load_dotenv
import gradio as gr
import os, sys
import logging
#loads dotenv lib to retrieve API keys from .env file
load_dotenv()
# enable INFO level logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#define LLM service
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo"))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
#set the global service context object, avoiding passing service_context when building the index or when loading index from vector store
from llama_index import set_global_service_context
set_global_service_context(service_context)
def data_ingestion_indexing():
#load data
report_2021_docs = SimpleDirectoryReader(input_files=["reports/executive-summary-2021.pdf"]).load_data()
print(f"loaded executive summary 2021 with {len(report_2021_docs)} pages")
report_2022_docs = SimpleDirectoryReader(input_files=["reports/executive-summary-2022.pdf"]).load_data()
print(f"loaded executive summary 2022 with {len(report_2022_docs)} pages")
#build indices
report_2021_index = GPTVectorStoreIndex.from_documents(report_2021_docs)
print(f"built index for executive summary 2021 with {len(report_2021_index.docstore.docs)} nodes")
report_2022_index = GPTVectorStoreIndex.from_documents(report_2022_docs)
print(f"built index for executive summary 2022 with {len(report_2022_index.docstore.docs)} nodes")
#build query engines
report_2021_engine = report_2021_index.as_query_engine(similarity_top_k=3)
report_2022_engine = report_2022_index.as_query_engine(similarity_top_k=3)
#build query engine tools
query_engine_tools = [
QueryEngineTool(
query_engine = report_2021_engine,
metadata = ToolMetadata(name='executive_summary_2021', description='Provides information on US government financial report executive summary 2021')
),
QueryEngineTool(
query_engine = report_2022_engine,
metadata = ToolMetadata(name='executive_summary_2022', description='Provides information on US government financial report executive summary 2022')
)
]
#define SubQuestionQueryEngine
sub_question_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)
return sub_question_engine
def data_querying(input_text):
#queries the engine with the input text
response = sub_question_engine.query(input_text)
return response.response
iface = gr.Interface(fn=data_querying,
inputs=gr.components.Textbox(lines=3, label="Enter your question"),
outputs="text",
title="Analyzing the U.S. Government's Financial Reports for 2021 and 2022")
#data ingestion and indexing
sub_question_engine = data_ingestion_indexing()
iface.launch(share=False)
#run queries
#response = sub_question_engine.query('Compare and contrast the DoD costs between 2021 and 2022')
#print(response)
#response = sub_question_engine.query('Compare revenue growth from 2021 to 2022')
#print(response)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.set_global_service_context",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((460, 473), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (471, 473), False, 'from dotenv import load_dotenv\n'), ((503, 561), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (522, 561), False, 'import logging\n'), ((762, 819), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (790, 819), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((1009, 1052), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1035, 1052), False, 'from llama_index import set_global_service_context\n'), ((593, 633), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (614, 633), False, 'import logging\n'), ((1521, 1573), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['report_2021_docs'], {}), '(report_2021_docs)\n', (1555, 1573), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((1702, 1754), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['report_2022_docs'], {}), '(report_2022_docs)\n', (1736, 1754), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((2653, 2728), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools'}), '(query_engine_tools=query_engine_tools)\n', (2689, 2728), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((562, 581), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (579, 581), False, 'import logging\n'), ((689, 742), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (699, 742), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3001, 3060), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(3)', 'label': '"""Enter your question"""'}), "(lines=3, label='Enter your question')\n", (3022, 3060), True, 'import gradio as gr\n'), ((1123, 1196), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['reports/executive-summary-2021.pdf']"}), "(input_files=['reports/executive-summary-2021.pdf'])\n", (1144, 1196), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((1312, 1385), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['reports/executive-summary-2022.pdf']"}), "(input_files=['reports/executive-summary-2022.pdf'])\n", (1333, 1385), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((2195, 2341), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""executive_summary_2021"""', 'description': '"""Provides information on US government financial report executive summary 2021"""'}), "(name='executive_summary_2021', description=\n 'Provides information on US government financial report executive summary 2021'\n )\n", (2207, 2341), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((2438, 2584), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""executive_summary_2022"""', 'description': '"""Provides information on US government financial report executive summary 2022"""'}), "(name='executive_summary_2022', description=\n 'Provides information on US government financial report executive summary 2022'\n )\n", (2450, 2584), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')] |
from llama_index.schema import Document
from llmsherpa.readers import LayoutPDFReader
import pandas as pd
llmsherpa_api_url = "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all"
def parse_source_item(item):
documents = []
if item['type'] == 'csv':
data = pd.read_csv(item['path'])
questions = data[data.columns[0]].tolist()
answers = data[data.columns[1]].tolist()
for index, question in enumerate(questions):
text = question + " " + answers[index]
documents.append(Document(text=text, id_=f"{item['_id']}_{index}"))
if item['type'] == 'pdf':
pdf_reader = LayoutPDFReader(llmsherpa_api_url)
doc = pdf_reader.read_pdf(item['path'])
for index, chunk in enumerate(doc.chunks()):
documents.append(Document(text=chunk.to_context_text(), id_=f"{item['_id']}_{index}"))
if item['type'] == 'qa':
text = item['question'] + " " + item['answer']
documents.append(Document(text=text, id_=f"{item['_id']}"))
return documents
def parse_items_to_delete(item):
documents_to_delete = []
if item['type'] == 'csv':
data = pd.read_csv(item['path'])
questions = data[data.columns[0]].tolist()
for index in enumerate(questions):
documents_to_delete.append(f"{item['_id']}_{index}")
if item['type'] == 'pdf':
pdf_reader = LayoutPDFReader(llmsherpa_api_url)
doc = pdf_reader.read_pdf(item['path'])
for index in enumerate(doc.chunks()):
documents_to_delete.append(f"{item['_id']}_{index}")
if item['type'] == 'qa':
documents_to_delete.append(f"{item['_id']}")
return documents_to_delete
| [
"llama_index.schema.Document"
] | [((310, 335), 'pandas.read_csv', 'pd.read_csv', (["item['path']"], {}), "(item['path'])\n", (321, 335), True, 'import pandas as pd\n'), ((675, 709), 'llmsherpa.readers.LayoutPDFReader', 'LayoutPDFReader', (['llmsherpa_api_url'], {}), '(llmsherpa_api_url)\n', (690, 709), False, 'from llmsherpa.readers import LayoutPDFReader\n'), ((1198, 1223), 'pandas.read_csv', 'pd.read_csv', (["item['path']"], {}), "(item['path'])\n", (1209, 1223), True, 'import pandas as pd\n'), ((1437, 1471), 'llmsherpa.readers.LayoutPDFReader', 'LayoutPDFReader', (['llmsherpa_api_url'], {}), '(llmsherpa_api_url)\n', (1452, 1471), False, 'from llmsherpa.readers import LayoutPDFReader\n'), ((1023, 1064), 'llama_index.schema.Document', 'Document', ([], {'text': 'text', 'id_': 'f"""{item[\'_id\']}"""'}), '(text=text, id_=f"{item[\'_id\']}")\n', (1031, 1064), False, 'from llama_index.schema import Document\n'), ((572, 621), 'llama_index.schema.Document', 'Document', ([], {'text': 'text', 'id_': 'f"""{item[\'_id\']}_{index}"""'}), '(text=text, id_=f"{item[\'_id\']}_{index}")\n', (580, 621), False, 'from llama_index.schema import Document\n')] |
"""Adapted from https://github.com/jerryjliu/llama_index/blob/main/docs/examples/finetuning/embeddings/eval_utils.py"""
from llama_index.schema import TextNode
from llama_index import ServiceContext, VectorStoreIndex
import pandas as pd
from tqdm import tqdm
def evaluate(
dataset,
embed_model,
top_k=10,
):
corpus = dataset.corpus
queries = dataset.queries
relevant_docs = dataset.relevant_docs
service_context = ServiceContext.from_defaults(embed_model=embed_model)
nodes = [TextNode(id_=id_, text=text) for id_, text in corpus.items()]
index = VectorStoreIndex(nodes, service_context=service_context, show_progress=True)
retriever = index.as_retriever(similarity_top_k=top_k)
eval_results = []
ct = 0
for query_id, query in tqdm(queries.items()):
# We only evaluate the first 2000 queries. We have to do this because llama-index was way too slow. all 200k
# test queries was scheduled to take 400 hours on the A100. So we bring it to 2k queries, which should take
# 4 hours
if ct >= 2000:
break
retrieved_nodes = retriever.retrieve(query)
retrieved_ids = [node.node.node_id for node in retrieved_nodes]
expected_id = relevant_docs[query_id][0]
rank = None
for idx, id in enumerate(retrieved_ids):
if id == expected_id:
rank = idx + 1
break
is_hit = rank is not None # assume 1 relevant doc
mrr = 0 if rank is None else 1 / rank
eval_result = {
"is_hit": is_hit,
"mrr": mrr,
"retrieved": retrieved_ids,
"expected": expected_id,
"query": query_id,
}
eval_results.append(eval_result)
ct += 1
return eval_results
def display_results(names, results_arr):
"""Display results from evaluate."""
hit_rates = []
mrrs = []
for name, results in zip(names, results_arr):
results_df = pd.DataFrame(results)
hit_rate = results_df["is_hit"].mean()
mrr = results_df["mrr"].mean()
hit_rates.append(hit_rate)
mrrs.append(mrr)
final_df = pd.DataFrame({"retrievers": names, "hit_rate": hit_rates, "mrr": mrrs})
print(final_df)
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.schema.TextNode",
"llama_index.VectorStoreIndex"
] | [((446, 499), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (474, 499), False, 'from llama_index import ServiceContext, VectorStoreIndex\n'), ((587, 663), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'service_context': 'service_context', 'show_progress': '(True)'}), '(nodes, service_context=service_context, show_progress=True)\n', (603, 663), False, 'from llama_index import ServiceContext, VectorStoreIndex\n'), ((2186, 2257), 'pandas.DataFrame', 'pd.DataFrame', (["{'retrievers': names, 'hit_rate': hit_rates, 'mrr': mrrs}"], {}), "({'retrievers': names, 'hit_rate': hit_rates, 'mrr': mrrs})\n", (2198, 2257), True, 'import pandas as pd\n'), ((513, 541), 'llama_index.schema.TextNode', 'TextNode', ([], {'id_': 'id_', 'text': 'text'}), '(id_=id_, text=text)\n', (521, 541), False, 'from llama_index.schema import TextNode\n'), ((2002, 2023), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (2014, 2023), True, 'import pandas as pd\n')] |
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core.prompts import ChatPromptTemplate
chat_text_qa_msgs = [
ChatMessage(
role=MessageRole.SYSTEM,
content=("""You are an expert Q&A system that is trusted around the world. You are answering texts on Hindu scriptures. Always make sure to follow these rules:
1. Be cautious to avoid offending followers of Hindu dharma.
2. Rely on provided context information rather than prior knowledge.
3. Avoid phrases like 'Based on the context...' and answer directly.
4. Quote sources, chapters, kandas, sargas, and shlokas (along with their explanation) from the context to explain the relevance of the answer.
5. Format answers using markdowns, emojis. For example, you can format sanskrit shlokas (and their meaning) using blockquote.
6. Add a disclaimer saying that the answers may be wrong as there are multiple interpretations of these scriptures and some context might be missing. Ask users to do their research before accepting answers. This message should be clear and loud for out of context answers."""
),
),
ChatMessage(
role=MessageRole.USER,
content=(
"""Context information is below.
---------------------
{context_str}
---------------------
Given the context information and not prior knowledge, answer the query. The format should be:
"<Brief answer>
<Explanations with sources quoted>
<Final Summary/ Conclusion>"
Query: {query_str}
Answer: """
),
),
]
custom_text_qa_template = ChatPromptTemplate(chat_text_qa_msgs)
training_chat_text_qa_msgs = [
ChatMessage(
role=MessageRole.SYSTEM,
content=("""You are answering texts on Hindu scriptures. Make sure to follow these rules:
1. Be respectful to Hindu dharma followers; avoid potential offense.
2. Rely solely on provided context, not prior knowledge.
3. Directly answer; omit phrases like 'Based on the context...'.
4. Quote sources, chapters, kandas, sargas, and shlokas, explaining their relevance.
5. Format using markdowns and emojis, e.g., blockquote for shlokas and meanings.
6. Add a disclaimer: Answers may be subjective; research independently.
The format should be:
"<Brief answer>
<Explanations with sources quoted>
<Final Summary/ Conclusion>
<Optional Disclaimer>"
"""
),
),
ChatMessage(
role=MessageRole.USER,
content=(
"""Context information is below.
---------------------
{context_str}
---------------------
Query: {query_str}
Answer: """
),
),
]
training_text_qa_template = ChatPromptTemplate(training_chat_text_qa_msgs)
| [
"llama_index.core.prompts.ChatPromptTemplate",
"llama_index.core.llms.ChatMessage"
] | [((1612, 1649), 'llama_index.core.prompts.ChatPromptTemplate', 'ChatPromptTemplate', (['chat_text_qa_msgs'], {}), '(chat_text_qa_msgs)\n', (1630, 1649), False, 'from llama_index.core.prompts import ChatPromptTemplate\n'), ((2678, 2724), 'llama_index.core.prompts.ChatPromptTemplate', 'ChatPromptTemplate', (['training_chat_text_qa_msgs'], {}), '(training_chat_text_qa_msgs)\n', (2696, 2724), False, 'from llama_index.core.prompts import ChatPromptTemplate\n'), ((142, 1141), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': '"""You are an expert Q&A system that is trusted around the world. You are answering texts on Hindu scriptures. Always make sure to follow these rules:\n 1. Be cautious to avoid offending followers of Hindu dharma.\n 2. Rely on provided context information rather than prior knowledge.\n 3. Avoid phrases like \'Based on the context...\' and answer directly.\n 4. Quote sources, chapters, kandas, sargas, and shlokas (along with their explanation) from the context to explain the relevance of the answer.\n 5. Format answers using markdowns, emojis. For example, you can format sanskrit shlokas (and their meaning) using blockquote.\n 6. Add a disclaimer saying that the answers may be wrong as there are multiple interpretations of these scriptures and some context might be missing. Ask users to do their research before accepting answers. This message should be clear and loud for out of context answers."""'}), '(role=MessageRole.SYSTEM, content=\n """You are an expert Q&A system that is trusted around the world. You are answering texts on Hindu scriptures. Always make sure to follow these rules:\n 1. Be cautious to avoid offending followers of Hindu dharma.\n 2. Rely on provided context information rather than prior knowledge.\n 3. Avoid phrases like \'Based on the context...\' and answer directly.\n 4. Quote sources, chapters, kandas, sargas, and shlokas (along with their explanation) from the context to explain the relevance of the answer.\n 5. Format answers using markdowns, emojis. For example, you can format sanskrit shlokas (and their meaning) using blockquote.\n 6. Add a disclaimer saying that the answers may be wrong as there are multiple interpretations of these scriptures and some context might be missing. Ask users to do their research before accepting answers. This message should be clear and loud for out of context answers."""\n )\n', (153, 1141), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((1180, 1545), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': '"""Context information is below.\n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the query. The format should be: \n"<Brief answer>\n<Explanations with sources quoted>\n<Final Summary/ Conclusion>"\n\nQuery: {query_str}\n \nAnswer: """'}), '(role=MessageRole.USER, content=\n """Context information is below.\n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the query. The format should be: \n"<Brief answer>\n<Explanations with sources quoted>\n<Final Summary/ Conclusion>"\n\nQuery: {query_str}\n \nAnswer: """\n )\n', (1191, 1545), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((1686, 2382), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': '"""You are answering texts on Hindu scriptures. Make sure to follow these rules:\n1. Be respectful to Hindu dharma followers; avoid potential offense.\n2. Rely solely on provided context, not prior knowledge.\n3. Directly answer; omit phrases like \'Based on the context...\'.\n4. Quote sources, chapters, kandas, sargas, and shlokas, explaining their relevance.\n5. Format using markdowns and emojis, e.g., blockquote for shlokas and meanings.\n6. Add a disclaimer: Answers may be subjective; research independently.\n\nThe format should be: \n"<Brief answer>\n<Explanations with sources quoted>\n<Final Summary/ Conclusion>\n<Optional Disclaimer>\\"\n"""'}), '(role=MessageRole.SYSTEM, content=\n """You are answering texts on Hindu scriptures. Make sure to follow these rules:\n1. Be respectful to Hindu dharma followers; avoid potential offense.\n2. Rely solely on provided context, not prior knowledge.\n3. Directly answer; omit phrases like \'Based on the context...\'.\n4. Quote sources, chapters, kandas, sargas, and shlokas, explaining their relevance.\n5. Format using markdowns and emojis, e.g., blockquote for shlokas and meanings.\n6. Add a disclaimer: Answers may be subjective; research independently.\n\nThe format should be: \n"<Brief answer>\n<Explanations with sources quoted>\n<Final Summary/ Conclusion>\n<Optional Disclaimer>\\"\n"""\n )\n', (1697, 2382), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((2420, 2609), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': '"""Context information is below.\n---------------------\n{context_str}\n---------------------\n\nQuery: {query_str}\n \nAnswer: """'}), '(role=MessageRole.USER, content=\n """Context information is below.\n---------------------\n{context_str}\n---------------------\n\nQuery: {query_str}\n \nAnswer: """\n )\n', (2431, 2609), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n')] |
import sys
sys.stdout.reconfigure(encoding="utf-8")
sys.stdin.reconfigure(encoding="utf-8")
import streamlit as st
import streamlit.components.v1 as components
import re
import random
CODE_BUILD_KG = """
# 准备 GraphStore
os.environ['NEBULA_USER'] = "root"
os.environ['NEBULA_PASSWORD'] = "nebula" # default password
os.environ['NEBULA_ADDRESS'] = "127.0.0.1:9669" # assumed we have NebulaGraph installed locally
space_name = "guardians"
edge_types, rel_prop_names = ["relationship"], ["relationship"] # default, could be omit if create from an empty kg
tags = ["entity"] # default, could be omit if create from an empty kg
graph_store = NebulaGraphStore(space_name=space_name, edge_types=edge_types, rel_prop_names=rel_prop_names, tags=tags)
storage_context = StorageContext.from_defaults(graph_store=graph_store)
# 从维基百科下载、预处理数据
from llama_index import download_loader
WikipediaReader = download_loader("WikipediaReader")
loader = WikipediaReader()
documents = loader.load_data(pages=['Guardians of the Galaxy Vol. 3'], auto_suggest=False)
# 利用 LLM 从文档中抽取知识三元组,并存储到 GraphStore(NebulaGraph)
kg_index = KnowledgeGraphIndex.from_documents(
documents,
storage_context=storage_context,
max_triplets_per_chunk=10,
service_context=service_context,
space_name=space_name,
edge_types=edge_types,
rel_prop_names=rel_prop_names,
tags=tags,
include_embeddings=True,
)
"""
CODE_NL2CYPHER_LANGCHAIN = """
## Langchain
# Doc: https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa
from langchain.chat_models import ChatOpenAI
from langchain.chains import NebulaGraphQAChain
from langchain.graphs import NebulaGraph
graph = NebulaGraph(
space=space_name,
username="root",
password="nebula",
address="127.0.0.1",
port=9669,
session_pool_size=30,
)
chain = NebulaGraphQAChain.from_llm(
llm, graph=graph, verbose=True
)
chain.run(
"Tell me about Peter Quill?",
)
"""
CODE_NL2CYPHER_LLAMAINDEX = """
## Llama Index
# Doc: https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html
from llama_index.query_engine import KnowledgeGraphQueryEngine
from llama_index.storage.storage_context import StorageContext
from llama_index.graph_stores import NebulaGraphStore
nl2kg_query_engine = KnowledgeGraphQueryEngine(
storage_context=storage_context,
service_context=service_context,
llm=llm,
verbose=True,
)
response = nl2kg_query_engine.query(
"Tell me about Peter Quill?",
)
"""
import os
import json
import openai
from llama_index.llms import AzureOpenAI
from langchain.embeddings import OpenAIEmbeddings
from llama_index import LangchainEmbedding
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
KnowledgeGraphIndex,
LLMPredictor,
ServiceContext,
)
from llama_index.storage.storage_context import StorageContext
from llama_index.graph_stores import NebulaGraphStore
import logging
import sys
logging.basicConfig(
stream=sys.stdout, level=logging.INFO
) # logging.DEBUG for more verbose output
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
openai.api_type = "azure"
openai.api_base = st.secrets["OPENAI_API_BASE"]
# openai.api_version = "2022-12-01" azure gpt-3
openai.api_version = "2023-05-15" # azure gpt-3.5 turbo
openai.api_key = st.secrets["OPENAI_API_KEY"]
llm = AzureOpenAI(
engine=st.secrets["DEPLOYMENT_NAME"],
temperature=0,
model="gpt-35-turbo",
)
llm_predictor = LLMPredictor(llm=llm)
# You need to deploy your own embedding model as well as your own chat completion model
embedding_llm = LangchainEmbedding(
OpenAIEmbeddings(
model="text-embedding-ada-002",
deployment=st.secrets["EMBEDDING_DEPLOYMENT_NAME"],
openai_api_key=openai.api_key,
openai_api_base=openai.api_base,
openai_api_type=openai.api_type,
openai_api_version=openai.api_version,
),
embed_batch_size=1,
)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor,
embed_model=embedding_llm,
)
os.environ["NEBULA_USER"] = st.secrets["graphd_user"]
os.environ["NEBULA_PASSWORD"] = st.secrets["graphd_password"]
os.environ[
"NEBULA_ADDRESS"
] = f"{st.secrets['graphd_host']}:{st.secrets['graphd_port']}"
space_name = "guardians"
edge_types, rel_prop_names = ["relationship"], [
"relationship"
] # default, could be omit if create from an empty kg
tags = ["entity"] # default, could be omit if create from an empty kg
graph_store = NebulaGraphStore(
space_name=space_name,
edge_types=edge_types,
rel_prop_names=rel_prop_names,
tags=tags,
)
storage_context = StorageContext.from_defaults(graph_store=graph_store)
from llama_index.query_engine import KnowledgeGraphQueryEngine
from llama_index.storage.storage_context import StorageContext
from llama_index.graph_stores import NebulaGraphStore
nl2kg_query_engine = KnowledgeGraphQueryEngine(
storage_context=storage_context,
service_context=service_context,
llm=llm,
verbose=True,
)
def cypher_to_all_paths(query):
# Find the MATCH and RETURN parts
match_parts = re.findall(r"(MATCH .+?(?=MATCH|$))", query, re.I | re.S)
return_part = re.search(r"RETURN .+", query).group()
modified_matches = []
path_ids = []
# Go through each MATCH part
for i, part in enumerate(match_parts):
path_id = f"path_{i}"
path_ids.append(path_id)
# Replace the MATCH keyword with "MATCH path_i = "
modified_part = part.replace("MATCH ", f"MATCH {path_id} = ")
modified_matches.append(modified_part)
# Join the modified MATCH parts
matches_string = " ".join(modified_matches)
# Construct the new RETURN part
return_string = f"RETURN {', '.join(path_ids)};"
# Remove the old RETURN part from matches_string
matches_string = matches_string.replace(return_part, "")
# Combine everything
modified_query = f"{matches_string}\n{return_string}"
return modified_query
# write string to file
def result_to_df(result):
from typing import Dict
import pandas as pd
columns = result.keys()
d: Dict[str, list] = {}
for col_num in range(result.col_size()):
col_name = columns[col_num]
col_list = result.column_values(col_name)
d[col_name] = [x.cast() for x in col_list]
return pd.DataFrame(d)
def render_pd_item(g, item):
from nebula3.data.DataObject import Node, PathWrapper, Relationship
if isinstance(item, Node):
node_id = item.get_id().cast()
tags = item.tags() # list of strings
props = dict()
for tag in tags:
props.update(item.properties(tag))
g.add_node(node_id, label=node_id, title=str(props))
elif isinstance(item, Relationship):
src_id = item.start_vertex_id().cast()
dst_id = item.end_vertex_id().cast()
edge_name = item.edge_name()
props = item.properties()
# ensure start and end vertex exist in graph
if not src_id in g.node_ids:
g.add_node(src_id)
if not dst_id in g.node_ids:
g.add_node(dst_id)
g.add_edge(src_id, dst_id, label=edge_name, title=str(props))
elif isinstance(item, PathWrapper):
for node in item.nodes():
render_pd_item(g, node)
for edge in item.relationships():
render_pd_item(g, edge)
elif isinstance(item, list):
for it in item:
render_pd_item(g, it)
def create_pyvis_graph(result_df):
from pyvis.network import Network
g = Network(
notebook=True,
directed=True,
cdn_resources="in_line",
height="500px",
width="100%",
)
for _, row in result_df.iterrows():
for item in row:
render_pd_item(g, item)
g.repulsion(
node_distance=100,
central_gravity=0.2,
spring_length=200,
spring_strength=0.05,
damping=0.09,
)
return g
def query_nebulagraph(
query,
space_name=space_name,
address=st.secrets["graphd_host"],
port=9669,
user=st.secrets["graphd_user"],
password=st.secrets["graphd_password"],
):
from nebula3.Config import SessionPoolConfig
from nebula3.gclient.net.SessionPool import SessionPool
config = SessionPoolConfig()
session_pool = SessionPool(user, password, space_name, [(address, port)])
session_pool.init(config)
return session_pool.execute(query)
st.title("利用 LLM 构建、查询知识图谱")
(
tab_code_kg,
tab_notebook,
tab_graph_view,
tab_cypher,
tab_nl2cypher,
tab_code_nl2cypher,
) = st.tabs(
[
"代码:构建知识图谱",
"完整 Notebook",
"图谱可视化",
"Cypher 查询",
"自然语言查询",
"代码:NL2Cypher",
]
)
with tab_code_kg:
st.write("> 利用 LLM,几行代码构建知识图谱")
st.code(body=CODE_BUILD_KG, language="python")
with tab_notebook:
st.write("> 完整 Demo 过程 Notebook")
st.write(
"""
这个 Notebook 展示了如何利用 LLM 从不同类型的信息源(以维基百科为例)中抽取知识三元组,并存储到图数据库 NebulaGraph 中。
本 Demo 中,我们先抽取了维基百科中关于《银河护卫队3》的信息,然后利用 LLM 生成的知识三元组,构建了一个图谱。
然后利用 Cypher 查询图谱,最后利用 LlamaIndex 和 Langchain 中的 NL2NebulaCypher,实现了自然语言查询图谱的功能。
您可以点击其他标签亲自试玩图谱的可视化、Cypher 查询、自然语言查询(NL2NebulaCypher)等功能。
"""
)
# link to download notebook
st.markdown(
"""
这里可以[下载](https://www.siwei.io/demo-dumps/kg-llm/KG_Building.ipynb) 完整的 Notebook。
"""
)
components.iframe(
src="https://www.siwei.io/demo-dumps/kg-llm/KG_Building.html",
height=2000,
width=800,
scrolling=True,
)
with tab_graph_view:
st.write(
"> 图谱的可视化部分采样,知识来源[银河护卫队3](https://en.wikipedia.org/wiki/Guardians_of_the_Galaxy_Vol._3)"
)
components.iframe(
src="https://www.siwei.io/demo-dumps/kg-llm/nebulagraph_draw_sample.html",
height=500,
scrolling=True,
)
with tab_cypher:
st.write("> Cypher 查询图库")
query_string = st.text_input(
label="输入查询语句", value="MATCH ()-[e]->() RETURN e LIMIT 25"
)
if st.button("> 执行"):
# run query
result = query_nebulagraph(query_string)
# convert to pandas dataframe
result_df = result_to_df(result)
# display pd dataframe
st.dataframe(result_df)
# create pyvis graph
g = create_pyvis_graph(result_df)
# render with random file name
import random
graph_html = g.generate_html(f"graph_{random.randint(0, 1000)}.html")
components.html(graph_html, height=500, scrolling=True)
with tab_nl2cypher:
st.write("> 使用自然语言查询图库")
nl_query_string = st.text_input(
label="输入自然语言问题", value="Tell me about Peter Quill?"
)
if st.button("生成 Cypher 查询语句,并执行"):
response = nl2kg_query_engine.query(nl_query_string)
graph_query = list(response.metadata.values())[0]["graph_store_query"]
graph_query = graph_query.replace("WHERE", "\n WHERE").replace(
"RETURN", "\nRETURN"
)
answer = str(response)
st.write(f"*答案*: {answer}")
st.markdown(
f"""
## 利用 LLM 生成的图查询语句
```cypher
{graph_query}
```
"""
)
st.write("## 结果可视化")
render_query = cypher_to_all_paths(graph_query)
result = query_nebulagraph(render_query)
result_df = result_to_df(result)
# create pyvis graph
g = create_pyvis_graph(result_df)
# render with random file name
graph_html = g.generate_html(f"graph_{random.randint(0, 1000)}.html")
components.html(graph_html, height=500, scrolling=True)
with tab_code_nl2cypher:
st.write("利用 Langchain 或者 Llama Index,我们可以只用几行代码就实现自然语言查询图谱(NL2NebulaCypher)")
tab_langchain, tab_llamaindex = st.tabs(["Langchain", "Llama Index"])
with tab_langchain:
st.code(body=CODE_NL2CYPHER_LANGCHAIN, language="python")
with tab_llamaindex:
st.code(body=CODE_NL2CYPHER_LLAMAINDEX, language="python")
st.markdown(
"""
## 参考文档
- [Langchain: NebulaGraphQAChain](https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa)
- [Llama Index: KnowledgeGraphQueryEngine](https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html)
"""
)
| [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.LLMPredictor",
"llama_index.graph_stores.NebulaGraphStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.query_engine.KnowledgeGraphQueryEngine",
"llama_index.llms.AzureOpenAI"
] | [((12, 52), 'sys.stdout.reconfigure', 'sys.stdout.reconfigure', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (34, 52), False, 'import sys\n'), ((53, 92), 'sys.stdin.reconfigure', 'sys.stdin.reconfigure', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (74, 92), False, 'import sys\n'), ((2988, 3046), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (3007, 3046), False, 'import logging\n'), ((3402, 3493), 'llama_index.llms.AzureOpenAI', 'AzureOpenAI', ([], {'engine': "st.secrets['DEPLOYMENT_NAME']", 'temperature': '(0)', 'model': '"""gpt-35-turbo"""'}), "(engine=st.secrets['DEPLOYMENT_NAME'], temperature=0, model=\n 'gpt-35-turbo')\n", (3413, 3493), False, 'from llama_index.llms import AzureOpenAI\n'), ((3520, 3541), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3532, 3541), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, KnowledgeGraphIndex, LLMPredictor, ServiceContext\n'), ((4009, 4098), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embedding_llm'}), '(llm_predictor=llm_predictor, embed_model=\n embedding_llm)\n', (4037, 4098), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, KnowledgeGraphIndex, LLMPredictor, ServiceContext\n'), ((4552, 4660), 'llama_index.graph_stores.NebulaGraphStore', 'NebulaGraphStore', ([], {'space_name': 'space_name', 'edge_types': 'edge_types', 'rel_prop_names': 'rel_prop_names', 'tags': 'tags'}), '(space_name=space_name, edge_types=edge_types,\n rel_prop_names=rel_prop_names, tags=tags)\n', (4568, 4660), False, 'from llama_index.graph_stores import NebulaGraphStore\n'), ((4694, 4747), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'graph_store': 'graph_store'}), '(graph_store=graph_store)\n', (4722, 4747), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((4952, 5071), 'llama_index.query_engine.KnowledgeGraphQueryEngine', 'KnowledgeGraphQueryEngine', ([], {'storage_context': 'storage_context', 'service_context': 'service_context', 'llm': 'llm', 'verbose': '(True)'}), '(storage_context=storage_context, service_context=\n service_context, llm=llm, verbose=True)\n', (4977, 5071), False, 'from llama_index.query_engine import KnowledgeGraphQueryEngine\n'), ((8530, 8558), 'streamlit.title', 'st.title', (['"""利用 LLM 构建、查询知识图谱"""'], {}), "('利用 LLM 构建、查询知识图谱')\n", (8538, 8558), True, 'import streamlit as st\n'), ((8680, 8769), 'streamlit.tabs', 'st.tabs', (["['代码:构建知识图谱', '完整 Notebook', '图谱可视化', 'Cypher 查询', '自然语言查询', '代码:NL2Cypher']"], {}), "(['代码:构建知识图谱', '完整 Notebook', '图谱可视化', 'Cypher 查询', '自然语言查询',\n '代码:NL2Cypher'])\n", (8687, 8769), True, 'import streamlit as st\n'), ((3671, 3920), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""', 'deployment': "st.secrets['EMBEDDING_DEPLOYMENT_NAME']", 'openai_api_key': 'openai.api_key', 'openai_api_base': 'openai.api_base', 'openai_api_type': 'openai.api_type', 'openai_api_version': 'openai.api_version'}), "(model='text-embedding-ada-002', deployment=st.secrets[\n 'EMBEDDING_DEPLOYMENT_NAME'], openai_api_key=openai.api_key,\n openai_api_base=openai.api_base, openai_api_type=openai.api_type,\n openai_api_version=openai.api_version)\n", (3687, 3920), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((5176, 5232), 're.findall', 're.findall', (['"""(MATCH .+?(?=MATCH|$))"""', 'query', '(re.I | re.S)'], {}), "('(MATCH .+?(?=MATCH|$))', query, re.I | re.S)\n", (5186, 5232), False, 'import re\n'), ((6408, 6423), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (6420, 6423), True, 'import pandas as pd\n'), ((7626, 7723), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'directed': '(True)', 'cdn_resources': '"""in_line"""', 'height': '"""500px"""', 'width': '"""100%"""'}), "(notebook=True, directed=True, cdn_resources='in_line', height=\n '500px', width='100%')\n", (7633, 7723), False, 'from pyvis.network import Network\n'), ((8361, 8380), 'nebula3.Config.SessionPoolConfig', 'SessionPoolConfig', ([], {}), '()\n', (8378, 8380), False, 'from nebula3.Config import SessionPoolConfig\n'), ((8400, 8458), 'nebula3.gclient.net.SessionPool.SessionPool', 'SessionPool', (['user', 'password', 'space_name', '[(address, port)]'], {}), '(user, password, space_name, [(address, port)])\n', (8411, 8458), False, 'from nebula3.gclient.net.SessionPool import SessionPool\n'), ((8850, 8881), 'streamlit.write', 'st.write', (['"""> 利用 LLM,几行代码构建知识图谱"""'], {}), "('> 利用 LLM,几行代码构建知识图谱')\n", (8858, 8881), True, 'import streamlit as st\n'), ((8886, 8932), 'streamlit.code', 'st.code', ([], {'body': 'CODE_BUILD_KG', 'language': '"""python"""'}), "(body=CODE_BUILD_KG, language='python')\n", (8893, 8932), True, 'import streamlit as st\n'), ((8957, 8990), 'streamlit.write', 'st.write', (['"""> 完整 Demo 过程 Notebook"""'], {}), "('> 完整 Demo 过程 Notebook')\n", (8965, 8990), True, 'import streamlit as st\n'), ((8995, 9313), 'streamlit.write', 'st.write', (['"""\n\n这个 Notebook 展示了如何利用 LLM 从不同类型的信息源(以维基百科为例)中抽取知识三元组,并存储到图数据库 NebulaGraph 中。\n\n本 Demo 中,我们先抽取了维基百科中关于《银河护卫队3》的信息,然后利用 LLM 生成的知识三元组,构建了一个图谱。\n然后利用 Cypher 查询图谱,最后利用 LlamaIndex 和 Langchain 中的 NL2NebulaCypher,实现了自然语言查询图谱的功能。\n\n您可以点击其他标签亲自试玩图谱的可视化、Cypher 查询、自然语言查询(NL2NebulaCypher)等功能。\n\n """'], {}), '(\n """\n\n这个 Notebook 展示了如何利用 LLM 从不同类型的信息源(以维基百科为例)中抽取知识三元组,并存储到图数据库 NebulaGraph 中。\n\n本 Demo 中,我们先抽取了维基百科中关于《银河护卫队3》的信息,然后利用 LLM 生成的知识三元组,构建了一个图谱。\n然后利用 Cypher 查询图谱,最后利用 LlamaIndex 和 Langchain 中的 NL2NebulaCypher,实现了自然语言查询图谱的功能。\n\n您可以点击其他标签亲自试玩图谱的可视化、Cypher 查询、自然语言查询(NL2NebulaCypher)等功能。\n\n """\n )\n', (9003, 9313), True, 'import streamlit as st\n'), ((9354, 9465), 'streamlit.markdown', 'st.markdown', (['"""\n这里可以[下载](https://www.siwei.io/demo-dumps/kg-llm/KG_Building.ipynb) 完整的 Notebook。\n"""'], {}), '(\n """\n这里可以[下载](https://www.siwei.io/demo-dumps/kg-llm/KG_Building.ipynb) 完整的 Notebook。\n"""\n )\n', (9365, 9465), True, 'import streamlit as st\n'), ((9475, 9604), 'streamlit.components.v1.iframe', 'components.iframe', ([], {'src': '"""https://www.siwei.io/demo-dumps/kg-llm/KG_Building.html"""', 'height': '(2000)', 'width': '(800)', 'scrolling': '(True)'}), "(src=\n 'https://www.siwei.io/demo-dumps/kg-llm/KG_Building.html', height=2000,\n width=800, scrolling=True)\n", (9492, 9604), True, 'import streamlit.components.v1 as components\n'), ((9661, 9770), 'streamlit.write', 'st.write', (['"""> 图谱的可视化部分采样,知识来源[银河护卫队3](https://en.wikipedia.org/wiki/Guardians_of_the_Galaxy_Vol._3)"""'], {}), "(\n '> 图谱的可视化部分采样,知识来源[银河护卫队3](https://en.wikipedia.org/wiki/Guardians_of_the_Galaxy_Vol._3)'\n )\n", (9669, 9770), True, 'import streamlit as st\n'), ((9780, 9909), 'streamlit.components.v1.iframe', 'components.iframe', ([], {'src': '"""https://www.siwei.io/demo-dumps/kg-llm/nebulagraph_draw_sample.html"""', 'height': '(500)', 'scrolling': '(True)'}), "(src=\n 'https://www.siwei.io/demo-dumps/kg-llm/nebulagraph_draw_sample.html',\n height=500, scrolling=True)\n", (9797, 9909), True, 'import streamlit.components.v1 as components\n'), ((9954, 9979), 'streamlit.write', 'st.write', (['"""> Cypher 查询图库"""'], {}), "('> Cypher 查询图库')\n", (9962, 9979), True, 'import streamlit as st\n'), ((9999, 10072), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""输入查询语句"""', 'value': '"""MATCH ()-[e]->() RETURN e LIMIT 25"""'}), "(label='输入查询语句', value='MATCH ()-[e]->() RETURN e LIMIT 25')\n", (10012, 10072), True, 'import streamlit as st\n'), ((10094, 10111), 'streamlit.button', 'st.button', (['"""> 执行"""'], {}), "('> 执行')\n", (10103, 10111), True, 'import streamlit as st\n'), ((10629, 10653), 'streamlit.write', 'st.write', (['"""> 使用自然语言查询图库"""'], {}), "('> 使用自然语言查询图库')\n", (10637, 10653), True, 'import streamlit as st\n'), ((10676, 10743), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""输入自然语言问题"""', 'value': '"""Tell me about Peter Quill?"""'}), "(label='输入自然语言问题', value='Tell me about Peter Quill?')\n", (10689, 10743), True, 'import streamlit as st\n'), ((10765, 10796), 'streamlit.button', 'st.button', (['"""生成 Cypher 查询语句,并执行"""'], {}), "('生成 Cypher 查询语句,并执行')\n", (10774, 10796), True, 'import streamlit as st\n'), ((11681, 11759), 'streamlit.write', 'st.write', (['"""利用 Langchain 或者 Llama Index,我们可以只用几行代码就实现自然语言查询图谱(NL2NebulaCypher)"""'], {}), "('利用 Langchain 或者 Llama Index,我们可以只用几行代码就实现自然语言查询图谱(NL2NebulaCypher)')\n", (11689, 11759), True, 'import streamlit as st\n'), ((11797, 11834), 'streamlit.tabs', 'st.tabs', (["['Langchain', 'Llama Index']"], {}), "(['Langchain', 'Llama Index'])\n", (11804, 11834), True, 'import streamlit as st\n'), ((12022, 12332), 'streamlit.markdown', 'st.markdown', (['"""\n\n## 参考文档\n \n- [Langchain: NebulaGraphQAChain](https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa)\n- [Llama Index: KnowledgeGraphQueryEngine](https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html)\n"""'], {}), '(\n """\n\n## 参考文档\n \n- [Langchain: NebulaGraphQAChain](https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa)\n- [Llama Index: KnowledgeGraphQueryEngine](https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html)\n"""\n )\n', (12033, 12332), True, 'import streamlit as st\n'), ((10302, 10325), 'streamlit.dataframe', 'st.dataframe', (['result_df'], {}), '(result_df)\n', (10314, 10325), True, 'import streamlit as st\n'), ((10548, 10603), 'streamlit.components.v1.html', 'components.html', (['graph_html'], {'height': '(500)', 'scrolling': '(True)'}), '(graph_html, height=500, scrolling=True)\n', (10563, 10603), True, 'import streamlit.components.v1 as components\n'), ((11093, 11120), 'streamlit.write', 'st.write', (['f"""*答案*: {answer}"""'], {}), "(f'*答案*: {answer}')\n", (11101, 11120), True, 'import streamlit as st\n'), ((11129, 11197), 'streamlit.markdown', 'st.markdown', (['f"""\n## 利用 LLM 生成的图查询语句\n```cypher\n{graph_query}\n```\n"""'], {}), '(f"""\n## 利用 LLM 生成的图查询语句\n```cypher\n{graph_query}\n```\n""")\n', (11140, 11197), True, 'import streamlit as st\n'), ((11228, 11248), 'streamlit.write', 'st.write', (['"""## 结果可视化"""'], {}), "('## 结果可视化')\n", (11236, 11248), True, 'import streamlit as st\n'), ((11594, 11649), 'streamlit.components.v1.html', 'components.html', (['graph_html'], {'height': '(500)', 'scrolling': '(True)'}), '(graph_html, height=500, scrolling=True)\n', (11609, 11649), True, 'import streamlit.components.v1 as components\n'), ((11867, 11924), 'streamlit.code', 'st.code', ([], {'body': 'CODE_NL2CYPHER_LANGCHAIN', 'language': '"""python"""'}), "(body=CODE_NL2CYPHER_LANGCHAIN, language='python')\n", (11874, 11924), True, 'import streamlit as st\n'), ((11958, 12016), 'streamlit.code', 'st.code', ([], {'body': 'CODE_NL2CYPHER_LLAMAINDEX', 'language': '"""python"""'}), "(body=CODE_NL2CYPHER_LLAMAINDEX, language='python')\n", (11965, 12016), True, 'import streamlit as st\n'), ((5252, 5281), 're.search', 're.search', (['"""RETURN .+"""', 'query'], {}), "('RETURN .+', query)\n", (5261, 5281), False, 'import re\n'), ((10507, 10530), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (10521, 10530), False, 'import random\n'), ((11553, 11576), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (11567, 11576), False, 'import random\n')] |
# wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_march_2022.pdf' -O './data/uber_10q_march_2022.pdf'
# wget "https://meetings.wmo.int/Cg-19/PublishingImages/SitePages/FINAC-43/7%20-%20EC-77-Doc%205%20Financial%20Statements%20for%202022%20(FINAC).pptx" -O './data/presentation.pptx'
import os
import nest_asyncio # noqa: E402
nest_asyncio.apply()
# bring in our LLAMA_CLOUD_API_KEY
from dotenv import load_dotenv
load_dotenv()
##### LLAMAPARSE #####
from llama_parse import LlamaParse
llamaparse_api_key = os.getenv("LLAMA_CLOUD_API_KEY")
#llama_parse_documents = LlamaParse(api_key=llamaparse_api_key, result_type="markdown").load_data("./data/presentation.pptx")
#llama_parse_documents = LlamaParse(api_key=llamaparse_api_key, result_type="markdown").load_data("./data/uber_10q_march_2022.pdf")
#llama_parse_documents = LlamaParse(api_key=llamaparse_api_key, result_type="markdown").load_data("./data/state_of_union.txt")
import pickle
# Define a function to load parsed data if available, or parse if not
def load_or_parse_data():
data_file = "./data/parsed_data.pkl"
if os.path.exists(data_file):
# Load the parsed data from the file
with open(data_file, "rb") as f:
parsed_data = pickle.load(f)
else:
# Perform the parsing step and store the result in llama_parse_documents
#llama_parse_documents = LlamaParse(api_key=llamaparse_api_key, result_type="markdown").load_data("./data/uber_10q_march_2022.pdf")
#llama_parse_documents = LlamaParse(api_key=llamaparse_api_key, result_type="markdown").load_data("./data/presentation.pptx")
llama_parse_documents = LlamaParse(api_key=llamaparse_api_key, result_type="markdown").load_data(["./data/presentation.pptx", "./data/uber_10q_march_2022.pdf"])
# Save the parsed data to a file
with open(data_file, "wb") as f:
pickle.dump(llama_parse_documents, f)
# Set the parsed data to the variable
parsed_data = llama_parse_documents
return parsed_data
# Call the function to either load or parse the data
llama_parse_documents = load_or_parse_data()
len(llama_parse_documents)
llama_parse_documents[0].text[:100]
type(llama_parse_documents)
######## QDRANT ###########
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import VectorStoreIndex, StorageContext
import qdrant_client
qdrant_url = os.getenv("QDRANT_URL")
qdrant_api_key = os.getenv("QDRANT_API_KEY")
######### FastEmbedEmbeddings #############
# by default llamaindex uses OpenAI models
from llama_index.embeddings.fastembed import FastEmbedEmbedding
embed_model = FastEmbedEmbedding(model_name="BAAI/bge-base-en-v1.5")
""" embed_model = OllamaEmbedding(
model_name="nomic-embed-text",
#model_name="llama2",
base_url="http://localhost:11434",
ollama_additional_kwargs={"mirostat": 0},
) """
#### Setting embed_model other than openAI ( by default used openAI's model)
from llama_index.core import Settings
Settings.embed_model = embed_model
######### Groq API ###########
from llama_index.llms.groq import Groq
groq_api_key = os.getenv("GROQ_API_KEY")
llm = Groq(model="mixtral-8x7b-32768", api_key=groq_api_key)
#llm = Groq(model="gemma-7b-it", api_key=groq_api_key)
######### Ollama ###########
#from llama_index.llms.ollama import Ollama # noqa: E402
#llm = Ollama(model="llama2", request_timeout=30.0)
#### Setting llm other than openAI ( by default used openAI's model)
Settings.llm = llm
client = qdrant_client.QdrantClient(api_key=qdrant_api_key, url=qdrant_url,)
vector_store = QdrantVectorStore(client=client, collection_name='qdrant_rag')
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(documents=llama_parse_documents, storage_context=storage_context, show_progress=True)
#### PERSIST INDEX #####
#index.storage_context.persist()
#storage_context = StorageContext.from_defaults(persist_dir="./storage")
#index = load_index_from_storage(storage_context)
# create a query engine for the index
query_engine = index.as_query_engine()
# query the engine
#query = "what is the common stock balance as of Balance as of March 31, 2022?"
query = "what is the letter of credit As of December 31, 2021 "
response = query_engine.query(query)
print(response) | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.StorageContext.from_defaults",
"llama_index.embeddings.fastembed.FastEmbedEmbedding",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.llms.groq.Groq"
] | [((378, 398), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (396, 398), False, 'import nest_asyncio\n'), ((466, 479), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (477, 479), False, 'from dotenv import load_dotenv\n'), ((561, 593), 'os.getenv', 'os.getenv', (['"""LLAMA_CLOUD_API_KEY"""'], {}), "('LLAMA_CLOUD_API_KEY')\n", (570, 593), False, 'import os\n'), ((2478, 2501), 'os.getenv', 'os.getenv', (['"""QDRANT_URL"""'], {}), "('QDRANT_URL')\n", (2487, 2501), False, 'import os\n'), ((2519, 2546), 'os.getenv', 'os.getenv', (['"""QDRANT_API_KEY"""'], {}), "('QDRANT_API_KEY')\n", (2528, 2546), False, 'import os\n'), ((2714, 2768), 'llama_index.embeddings.fastembed.FastEmbedEmbedding', 'FastEmbedEmbedding', ([], {'model_name': '"""BAAI/bge-base-en-v1.5"""'}), "(model_name='BAAI/bge-base-en-v1.5')\n", (2732, 2768), False, 'from llama_index.embeddings.fastembed import FastEmbedEmbedding\n'), ((3196, 3221), 'os.getenv', 'os.getenv', (['"""GROQ_API_KEY"""'], {}), "('GROQ_API_KEY')\n", (3205, 3221), False, 'import os\n'), ((3229, 3283), 'llama_index.llms.groq.Groq', 'Groq', ([], {'model': '"""mixtral-8x7b-32768"""', 'api_key': 'groq_api_key'}), "(model='mixtral-8x7b-32768', api_key=groq_api_key)\n", (3233, 3283), False, 'from llama_index.llms.groq import Groq\n'), ((3579, 3645), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'api_key': 'qdrant_api_key', 'url': 'qdrant_url'}), '(api_key=qdrant_api_key, url=qdrant_url)\n', (3605, 3645), False, 'import qdrant_client\n'), ((3663, 3725), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': '"""qdrant_rag"""'}), "(client=client, collection_name='qdrant_rag')\n", (3680, 3725), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((3744, 3799), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (3772, 3799), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((3808, 3929), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'llama_parse_documents', 'storage_context': 'storage_context', 'show_progress': '(True)'}), '(documents=llama_parse_documents,\n storage_context=storage_context, show_progress=True)\n', (3839, 3929), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((1145, 1170), 'os.path.exists', 'os.path.exists', (['data_file'], {}), '(data_file)\n', (1159, 1170), False, 'import os\n'), ((1284, 1298), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1295, 1298), False, 'import pickle\n'), ((1928, 1965), 'pickle.dump', 'pickle.dump', (['llama_parse_documents', 'f'], {}), '(llama_parse_documents, f)\n', (1939, 1965), False, 'import pickle\n'), ((1696, 1758), 'llama_parse.LlamaParse', 'LlamaParse', ([], {'api_key': 'llamaparse_api_key', 'result_type': '"""markdown"""'}), "(api_key=llamaparse_api_key, result_type='markdown')\n", (1706, 1758), False, 'from llama_parse import LlamaParse\n')] |
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
set_global_handler
)
import phoenix as px
px.launch_app()
set_global_handler("arize_phoenix")
documents = SimpleDirectoryReader('files').load_data()
index = VectorStoreIndex.from_documents(documents)
qe = index.as_query_engine()
response1 = qe.query("Tell me about ancient Rome")
response2 = qe.query("Where is the Colosseum?")
print(str(response1)+"\n"+str(response2))
# EVALUATION PART
# adapted from the examples available on the official Phoenix documentation: https://docs.arize.com/phoenix/
from phoenix.session.evaluation import (
get_qa_with_reference,
get_retrieved_documents
)
from phoenix.trace import DocumentEvaluations, SpanEvaluations
from phoenix.experimental.evals import (
HallucinationEvaluator,
QAEvaluator,
RelevanceEvaluator,
OpenAIModel,
run_evals
)
model = OpenAIModel(model="gpt-4-turbo-preview")
retrieved_documents_df = get_retrieved_documents(px.Client())
queries_df = get_qa_with_reference(px.Client())
hallucination_evaluator = HallucinationEvaluator(model)
qa_correctness_evaluator = QAEvaluator(model)
relevance_evaluator = RelevanceEvaluator(model)
hallucination_eval_df, qa_correctness_eval_df = run_evals(
dataframe=queries_df,
evaluators=[hallucination_evaluator, qa_correctness_evaluator],
provide_explanation=True,
)
relevance_eval_df = run_evals(
dataframe=retrieved_documents_df,
evaluators=[relevance_evaluator],
provide_explanation=True,
)[0]
px.Client().log_evaluations(
SpanEvaluations(
eval_name="Hallucination",
dataframe=hallucination_eval_df),
SpanEvaluations(
eval_name="QA Correctness",
dataframe=qa_correctness_eval_df),
DocumentEvaluations(
eval_name="Relevance",
dataframe=relevance_eval_df),
)
input("Press <ENTER> to exit...")
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.set_global_handler"
] | [((129, 144), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (142, 144), True, 'import phoenix as px\n'), ((145, 180), 'llama_index.core.set_global_handler', 'set_global_handler', (['"""arize_phoenix"""'], {}), "('arize_phoenix')\n", (163, 180), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, set_global_handler\n'), ((245, 287), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (276, 287), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, set_global_handler\n'), ((901, 941), 'phoenix.experimental.evals.OpenAIModel', 'OpenAIModel', ([], {'model': '"""gpt-4-turbo-preview"""'}), "(model='gpt-4-turbo-preview')\n", (912, 941), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((1080, 1109), 'phoenix.experimental.evals.HallucinationEvaluator', 'HallucinationEvaluator', (['model'], {}), '(model)\n', (1102, 1109), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((1137, 1155), 'phoenix.experimental.evals.QAEvaluator', 'QAEvaluator', (['model'], {}), '(model)\n', (1148, 1155), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((1178, 1203), 'phoenix.experimental.evals.RelevanceEvaluator', 'RelevanceEvaluator', (['model'], {}), '(model)\n', (1196, 1203), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((1253, 1378), 'phoenix.experimental.evals.run_evals', 'run_evals', ([], {'dataframe': 'queries_df', 'evaluators': '[hallucination_evaluator, qa_correctness_evaluator]', 'provide_explanation': '(True)'}), '(dataframe=queries_df, evaluators=[hallucination_evaluator,\n qa_correctness_evaluator], provide_explanation=True)\n', (1262, 1378), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((992, 1003), 'phoenix.Client', 'px.Client', ([], {}), '()\n', (1001, 1003), True, 'import phoenix as px\n'), ((1040, 1051), 'phoenix.Client', 'px.Client', ([], {}), '()\n', (1049, 1051), True, 'import phoenix as px\n'), ((1410, 1518), 'phoenix.experimental.evals.run_evals', 'run_evals', ([], {'dataframe': 'retrieved_documents_df', 'evaluators': '[relevance_evaluator]', 'provide_explanation': '(True)'}), '(dataframe=retrieved_documents_df, evaluators=[relevance_evaluator\n ], provide_explanation=True)\n', (1419, 1518), False, 'from phoenix.experimental.evals import HallucinationEvaluator, QAEvaluator, RelevanceEvaluator, OpenAIModel, run_evals\n'), ((1566, 1641), 'phoenix.trace.SpanEvaluations', 'SpanEvaluations', ([], {'eval_name': '"""Hallucination"""', 'dataframe': 'hallucination_eval_df'}), "(eval_name='Hallucination', dataframe=hallucination_eval_df)\n", (1581, 1641), False, 'from phoenix.trace import DocumentEvaluations, SpanEvaluations\n'), ((1665, 1742), 'phoenix.trace.SpanEvaluations', 'SpanEvaluations', ([], {'eval_name': '"""QA Correctness"""', 'dataframe': 'qa_correctness_eval_df'}), "(eval_name='QA Correctness', dataframe=qa_correctness_eval_df)\n", (1680, 1742), False, 'from phoenix.trace import DocumentEvaluations, SpanEvaluations\n'), ((1766, 1837), 'phoenix.trace.DocumentEvaluations', 'DocumentEvaluations', ([], {'eval_name': '"""Relevance"""', 'dataframe': 'relevance_eval_df'}), "(eval_name='Relevance', dataframe=relevance_eval_df)\n", (1785, 1837), False, 'from phoenix.trace import DocumentEvaluations, SpanEvaluations\n'), ((194, 224), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (215, 224), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, set_global_handler\n'), ((1533, 1544), 'phoenix.Client', 'px.Client', ([], {}), '()\n', (1542, 1544), True, 'import phoenix as px\n')] |
import os
import time
import asyncio
from dotenv import load_dotenv
from backend.eval import(
generate_eval_questions
)
from backend.llm import get_llm
from backend.ingestion import (
load_markdown_docs,
create_vector_index_locally
)
from backend.eval import (
evaluate_query_engine
)
from llama_index import (
download_loader,
load_index_from_storage,
Response,
ServiceContext,
set_global_service_context,
SimpleDirectoryReader,
)
from llama_index.evaluation import DatasetGenerator
from llama_index.evaluation import ResponseEvaluator
from llama_index.tools import QueryEngineTool
from llama_index.query_engine import SubQuestionQueryEngine
from llama_index.response_synthesizers import get_response_synthesizer
def run_test():
llm = get_llm(1,"gpt-3.5-turbo",0)
service_context = ServiceContext.from_defaults(llm=llm)
# set_global_service_context(service_context)
# load our documents from each folder. we keep them seperate for now, in order to create seperate indexes
getting_started_docs = load_markdown_docs("llamindex-docs-tutorial/getting_started")
gs_index = create_vector_index_locally("llamindex-docs-tutorial/getting_started",getting_started_docs)
# community_docs = load_markdown_docs("llamindex-docs-tutorial/community")
# community_index = create_vector_index_locally("llamindex-docs-tutorial/community",community_docs)
# data_docs = load_markdown_docs("llamindex-docs-tutorial/core_modules/data_modules")
# data_index = create_vector_index_locally("llamindex-docs-tutorial/core_modules/data_modules",data_docs)
# agent_docs = load_markdown_docs("llamindex-docs-tutorial/core_modules/agent_modules")
# agent_index = create_vector_index_locally("llamindex-docs-tutorial/core_modules/agent_modules",agent_docs)
# model_docs = load_markdown_docs("llamindex-docs-tutorial/core_modules/model_modules")
# model_index = create_vector_index_locally("llamindex-docs-tutorial/core_modules/model_modules",model_docs)
# query_docs = load_markdown_docs("llamindex-docs-tutorial/core_modules/query_modules")
# query_index = create_vector_index_locally("llamindex-docs-tutorial/core_modules/query_modules",query_docs)
# supporting_docs = load_markdown_docs("llamindex-docs-tutorial/core_modules/supporting_modules")
# supporting_index = create_vector_index_locally("llamindex-docs-tutorial/core_modules/supporting_modules",supporting_docs)
# tutorials_docs = load_markdown_docs("llamindex-docs-tutorial/end_to_end_tutorials")
# tutorials_index = create_vector_index_locally("llamindex-docs-tutorial/end_to_end_tutorials",tutorials_docs)
# contributing_docs = load_markdown_docs("llamindex-docs-tutorial/development")
# contributing_index = create_vector_index_locally("llamindex-docs-tutorial/development",contributing_docs)
# create a query engine tool for each index
getting_started_tool = QueryEngineTool.from_defaults(
query_engine=gs_index.as_query_engine(),
name="Getting Started",
description="Useful for answering questions about installing and running llama index, as well as basic explanations of how llama index works."
)
# community_tool = QueryEngineTool.from_defaults(
# query_engine=community_index.as_query_engine(),
# name="Community",
# description="Useful for answering questions about integrations and other apps built by the community."
# )
# data_tool = QueryEngineTool.from_defaults(
# query_engine=data_index.as_query_engine(),
# name="Data Modules",
# description="Useful for answering questions about data loaders, documents, nodes, and index structures."
# )
# agent_tool = QueryEngineTool.from_defaults(
# query_engine=agent_index.as_query_engine(),
# name="Agent Modules",
# description="Useful for answering questions about data agents, agent configurations, and tools."
# )
# model_tool = QueryEngineTool.from_defaults(
# query_engine=model_index.as_query_engine(),
# name="Model Modules",
# description="Useful for answering questions about using and configuring LLMs, embedding modles, and prompts."
# )
# query_tool = QueryEngineTool.from_defaults(
# query_engine=query_index.as_query_engine(),
# name="Query Modules",
# description="Useful for answering questions about query engines, query configurations, and using various parts of the query engine pipeline."
# )
# supporting_tool = QueryEngineTool.from_defaults(
# query_engine=supporting_index.as_query_engine(),
# name="Supporting Modules",
# description="Useful for answering questions about supporting modules, such as callbacks, service context, and avaluation."
# )
# tutorials_tool = QueryEngineTool.from_defaults(
# query_engine=tutorials_index.as_query_engine(),
# name="Tutorials",
# description="Useful for answering questions about end-to-end tutorials and giving examples of specific use-cases."
# )
# contributing_tool = QueryEngineTool.from_defaults(
# query_engine=contributing_index.as_query_engine(),
# name="Contributing",
# description="Useful for answering questions about contributing to llama index, including how to contribute to the codebase and how to build documentation."
# )
query_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=[
getting_started_tool,
# community_tool,
# data_tool,
# agent_tool,
# model_tool,
# query_tool,
# supporting_tool,
# tutorials_tool,
# contributing_tool
],
# enable this for streaming
# response_synthesizer=get_response_synthesizer(streaming=True),
verbose=False
)
# print("=================================")
# print("EVAL QUERY ENGINE TEST:")
# response = query_engine.query("How do I install llama index?")
# print(str(response))
# print("=================================")
print("=================================")
print("GENERATE QUESTIONS")
question_dataset = generate_eval_questions("llamindex-docs-tutorial/getting_started/")
print("=================================")
print("=================================")
print("EVAL QUESTIONS")
# llm = get_llm(1,"gpt-4",0)
llm2 = get_llm(1,"gpt-3.5-turbo",0)
service_context2 = ServiceContext.from_defaults(llm=llm2)
evaluator = ResponseEvaluator(service_context=service_context)
all_results = evaluate_query_engine(evaluator, query_engine, question_dataset)
print("=================================")
# total_correct, all_results = evaluate_query_engine(evaluator, query_engine, question_dataset)
# print(f"Halucination? Scored {total_correct} out of {len(question_dataset)} questions correctly.")
total_correct = 0
# for r in all_results:
# print(r)
# total_correct += 1
# print(f"Halucination? Scored {total_correct} out of {len(all_results)} questions correctly.")
if __name__ == "__main__":
run_test()
# asyncio.run(run_test()) | [
"llama_index.ServiceContext.from_defaults",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults",
"llama_index.evaluation.ResponseEvaluator"
] | [((786, 816), 'backend.llm.get_llm', 'get_llm', (['(1)', '"""gpt-3.5-turbo"""', '(0)'], {}), "(1, 'gpt-3.5-turbo', 0)\n", (793, 816), False, 'from backend.llm import get_llm\n'), ((837, 874), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (865, 874), False, 'from llama_index import download_loader, load_index_from_storage, Response, ServiceContext, set_global_service_context, SimpleDirectoryReader\n'), ((1063, 1124), 'backend.ingestion.load_markdown_docs', 'load_markdown_docs', (['"""llamindex-docs-tutorial/getting_started"""'], {}), "('llamindex-docs-tutorial/getting_started')\n", (1081, 1124), False, 'from backend.ingestion import load_markdown_docs, create_vector_index_locally\n'), ((1140, 1236), 'backend.ingestion.create_vector_index_locally', 'create_vector_index_locally', (['"""llamindex-docs-tutorial/getting_started"""', 'getting_started_docs'], {}), "('llamindex-docs-tutorial/getting_started',\n getting_started_docs)\n", (1167, 1236), False, 'from backend.ingestion import load_markdown_docs, create_vector_index_locally\n'), ((5464, 5563), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': '[getting_started_tool]', 'verbose': '(False)'}), '(query_engine_tools=[\n getting_started_tool], verbose=False)\n', (5500, 5563), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((6276, 6343), 'backend.eval.generate_eval_questions', 'generate_eval_questions', (['"""llamindex-docs-tutorial/getting_started/"""'], {}), "('llamindex-docs-tutorial/getting_started/')\n", (6299, 6343), False, 'from backend.eval import generate_eval_questions\n'), ((6512, 6542), 'backend.llm.get_llm', 'get_llm', (['(1)', '"""gpt-3.5-turbo"""', '(0)'], {}), "(1, 'gpt-3.5-turbo', 0)\n", (6519, 6542), False, 'from backend.llm import get_llm\n'), ((6564, 6602), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm2'}), '(llm=llm2)\n', (6592, 6602), False, 'from llama_index import download_loader, load_index_from_storage, Response, ServiceContext, set_global_service_context, SimpleDirectoryReader\n'), ((6619, 6669), 'llama_index.evaluation.ResponseEvaluator', 'ResponseEvaluator', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (6636, 6669), False, 'from llama_index.evaluation import ResponseEvaluator\n'), ((6688, 6752), 'backend.eval.evaluate_query_engine', 'evaluate_query_engine', (['evaluator', 'query_engine', 'question_dataset'], {}), '(evaluator, query_engine, question_dataset)\n', (6709, 6752), False, 'from backend.eval import evaluate_query_engine\n')] |
import streamlit as st
import openai
import os.path as op
from llama_index import ServiceContext
from llama_index import VectorStoreIndex,download_loader
from pathlib import Path
from llama_index.retrievers import VectorIndexRetriever
from llama_index.llms import OpenAI
# specify path to CSV file, OPENAI api_key, and model below
FILE_PATH = "../data/siriraj_doctor_details.csv"
assert op.exists(FILE_PATH), f"CSV file not found at {FILE_PATH}, please check the file path."
openai.api_key = "sk-..."
MODEL = "gpt-4"
st.set_page_config(page_title="Chatbot for doctor appointment", page_icon="🦙", layout="centered", initial_sidebar_state="auto", menu_items=None)
st.title("Chatbot for doctor appointment")
st.info("แชทบอทช่วยตอบคำถามสำหรับการนัดหมายแพทย์ที่โรงพยาบาลศิริราช ปิยมหาราชการุณย์ ดูข้อมูลแพทย์เพิ่มเติมได้ที่ https://www.siphhospital.com/th/medical-services/find-doctor", icon="📃")
system_prompt = """
Given the following doctors' data in the CSV file and their embeddings, create a response in Thai to a patient asking about scheduling an appointment,\
inquiring about the doctor's expertise, or seeking a recommendation for a doctor based on their needs. \
Note that user may inquire in a more casual text and you need to understand infer what they need before response.\
If user ask about doctor's data e.g. name, please provide information back in an easy to read format.\
Use only the data provided. The response should be in Thai and do not hallucinate. \
"""
llm = OpenAI(model=MODEL, system_prompt=system_prompt, temperature=0.3)
service_context = ServiceContext.from_defaults(llm=llm)
@st.cache_resource(show_spinner=False)
def load_data(file_path: str):
with st.spinner(text="Loading and indexing the Streamlit docs – hang tight! This should take 1-2 minutes."):
PandasCSVReader = download_loader("PandasCSVReader")
loader = PandasCSVReader()
docs = loader.load_data(file=Path(file_path))
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
return index
index = load_data(FILE_PATH)
chat_engine = index.as_chat_engine(chat_mode="context")
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "สอบถามข้อมูลการนัดหมายแพทย์ได้ที่นี่ครับ"}
]
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
st.session_state.chat_engine = chat_engine
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
messages = getattr(st.session_state, 'messages', [])
for message in messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
#If last message is not from assistant, generate a new response
if messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.chat(prompt)
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.download_loader"
] | [((389, 409), 'os.path.exists', 'op.exists', (['FILE_PATH'], {}), '(FILE_PATH)\n', (398, 409), True, 'import os.path as op\n'), ((520, 669), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chatbot for doctor appointment"""', 'page_icon': '"""🦙"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title='Chatbot for doctor appointment', page_icon=\n '🦙', layout='centered', initial_sidebar_state='auto', menu_items=None)\n", (538, 669), True, 'import streamlit as st\n'), ((665, 707), 'streamlit.title', 'st.title', (['"""Chatbot for doctor appointment"""'], {}), "('Chatbot for doctor appointment')\n", (673, 707), True, 'import streamlit as st\n'), ((708, 904), 'streamlit.info', 'st.info', (['"""แชทบอทช่วยตอบคำถามสำหรับการนัดหมายแพทย์ที่โรงพยาบาลศิริราช ปิยมหาราชการุณย์ ดูข้อมูลแพทย์เพิ่มเติมได้ที่ https://www.siphhospital.com/th/medical-services/find-doctor"""'], {'icon': '"""📃"""'}), "(\n 'แชทบอทช่วยตอบคำถามสำหรับการนัดหมายแพทย์ที่โรงพยาบาลศิริราช ปิยมหาราชการุณย์ ดูข้อมูลแพทย์เพิ่มเติมได้ที่ https://www.siphhospital.com/th/medical-services/find-doctor'\n , icon='📃')\n", (715, 904), True, 'import streamlit as st\n'), ((1487, 1552), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'MODEL', 'system_prompt': 'system_prompt', 'temperature': '(0.3)'}), '(model=MODEL, system_prompt=system_prompt, temperature=0.3)\n', (1493, 1552), False, 'from llama_index.llms import OpenAI\n'), ((1571, 1608), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (1599, 1608), False, 'from llama_index import ServiceContext\n'), ((1612, 1649), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1629, 1649), True, 'import streamlit as st\n'), ((2156, 2179), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2177, 2179), True, 'import streamlit as st\n'), ((2370, 2393), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2391, 2393), True, 'import streamlit as st\n'), ((2489, 2519), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (2502, 2519), True, 'import streamlit as st\n'), ((2574, 2643), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (2606, 2643), True, 'import streamlit as st\n'), ((1690, 1802), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the Streamlit docs – hang tight! This should take 1-2 minutes."""'}), "(text=\n 'Loading and indexing the Streamlit docs – hang tight! This should take 1-2 minutes.'\n )\n", (1700, 1802), True, 'import streamlit as st\n'), ((1820, 1854), 'llama_index.download_loader', 'download_loader', (['"""PandasCSVReader"""'], {}), "('PandasCSVReader')\n", (1835, 1854), False, 'from llama_index import VectorStoreIndex, download_loader\n'), ((1960, 2030), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (1991, 2030), False, 'from llama_index import VectorStoreIndex, download_loader\n'), ((2765, 2797), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2780, 2797), True, 'import streamlit as st\n'), ((2807, 2835), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2815, 2835), True, 'import streamlit as st\n'), ((2950, 2978), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2965, 2978), True, 'import streamlit as st\n'), ((2993, 3018), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (3003, 3018), True, 'import streamlit as st\n'), ((3043, 3084), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', (['prompt'], {}), '(prompt)\n', (3076, 3084), True, 'import streamlit as st\n'), ((3097, 3124), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (3105, 3124), True, 'import streamlit as st\n'), ((3211, 3252), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (3243, 3252), True, 'import streamlit as st\n'), ((1927, 1942), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (1931, 1942), False, 'from pathlib import Path\n')] |
from llama_index.core import (ServiceContext, SQLDatabase, VectorStoreIndex,
set_global_service_context)
from llama_index.core.indices.struct_store.sql_query import \
SQLTableRetrieverQueryEngine
from llama_index.core.objects import (ObjectIndex, SQLTableNodeMapping,
SQLTableSchema)
from llama_index.core.prompts.base import Prompt
from llama_index.core.prompts.prompt_type import PromptType
from llama_index.llms.langchain import LangChainLLM
from sqlalchemy import MetaData, create_engine
from typings.agent import AgentWithConfigsOutput
from typings.config import AccountSettings
from utils.model import get_llm
class SQLQueryEngine:
"""LLamaIndex SQL Query Engine for SQL datasource"""
def __init__(
self,
settings: AccountSettings,
agent_with_configs: AgentWithConfigsOutput,
uri: str,
):
self.sql_database = SQLDatabase(engine=create_engine(uri))
self.meta = MetaData()
self.meta.reflect(bind=self.sql_database.engine)
self.settings = settings
self.agent_with_configs = agent_with_configs
def run(self, query: str):
"""Run query and return result"""
obj_index = self.initialize_sql_index()
try:
query_engine = self.create_sql_query_engine(obj_index)
res = query_engine.query(query)
return res.response
except Exception as err:
print(err)
return str(err)
def initialize_sql_index(self):
"""Initialize LLamaIndex SQL index"""
table_names = self.meta.tables.keys()
table_schema_objs = [
SQLTableSchema(table_name=table_name) for table_name in table_names
]
table_node_mapping = SQLTableNodeMapping(self.sql_database)
# TODO: research about saving index
obj_index = ObjectIndex.from_objects(
table_schema_objs,
table_node_mapping,
VectorStoreIndex,
)
return obj_index
def create_query_engine(self, obj_index, template: str):
text_to_sql_prompt = Prompt(
template,
prompt_type=PromptType.TEXT_TO_SQL,
)
llm = LangChainLLM(
llm=get_llm(
self.settings,
self.agent_with_configs,
),
)
service_context = ServiceContext.from_defaults(llm=llm)
set_global_service_context(service_context)
query_engine = SQLTableRetrieverQueryEngine(
sql_database=self.sql_database,
table_retriever=obj_index.as_retriever(similarity_top_k=4),
synthesize_response=False,
text_to_sql_prompt=text_to_sql_prompt,
service_context=service_context,
)
return query_engine
def create_sql_query_engine(self, obj_index: ObjectIndex):
TEXT_TO_SQL_PROMPT_TEMPLATE = (
"Given an input question, first create a syntactically correct {dialect} "
"query to run, then look at the results of the query and return the answer. "
"You can order the results by a relevant column to return the most "
"interesting examples in the database.\n"
"Never query for all the columns from a specific table, only ask for a "
"few relevant columns given the question.\n"
"Pay attention to use only the column names that you can see in the schema "
"description. "
"Be careful to not query for columns that do not exist. "
"Pay attention to which column is in which table. "
"Also, qualify column names with the table name when needed.\n"
"Use the following format:\n"
"Question: Question here\n"
"SQLQuery: SQL Query to run\n"
"SQLResult: Result of the SQLQuery\n"
"Answer: Final answer here\n"
"Only use the tables listed below.\n"
"{schema}\n"
"Question: {query_str}\n"
"SQLQuery: "
)
return self.create_query_engine(obj_index, TEXT_TO_SQL_PROMPT_TEMPLATE)
def fix_sql_query_engine(self, obj_index: ObjectIndex, sql: str, error: str):
TEXT_TO_SQL_PROMPT_TEMPLATE = (
"Given an input question, generated {dialect} query and error in the SQL query, fix SQL"
"You can order the results by a relevant column to return the most "
"interesting examples in the database.\n"
"Never query for all the columns from a specific table, only ask for a "
"few relevant columns given the question.\n"
"Pay attention to use only the column names that you can see in the schema "
"description. "
"Be careful to not query for columns that do not exist. "
"Pay attention to which column is in which table. "
"Also, qualify column names with the table name when needed.\n"
"Use the following format:\n"
"Question: Question here\n"
"SQLError: SQL Error here\n",
"SQLGenerated: SQL that was previously run and resulted in error here\n"
"SQLQuery: SQL Query to run\n"
"SQLResult: Result of the SQLQuery\n"
"Answer: Final answer here\n"
"Only use the tables listed below.\n"
"{schema}\n"
"Question: {query_str}\n"
f"SQLError: {error}\n"
f"SQLGenerated: {sql}\n"
"SQLQuery: ",
)
return self.create_query_engine(obj_index, TEXT_TO_SQL_PROMPT_TEMPLATE)
| [
"llama_index.core.set_global_service_context",
"llama_index.core.objects.SQLTableNodeMapping",
"llama_index.core.objects.SQLTableSchema",
"llama_index.core.ServiceContext.from_defaults",
"llama_index.core.objects.ObjectIndex.from_objects",
"llama_index.core.prompts.base.Prompt"
] | [((1002, 1012), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (1010, 1012), False, 'from sqlalchemy import MetaData, create_engine\n'), ((1801, 1839), 'llama_index.core.objects.SQLTableNodeMapping', 'SQLTableNodeMapping', (['self.sql_database'], {}), '(self.sql_database)\n', (1820, 1839), False, 'from llama_index.core.objects import ObjectIndex, SQLTableNodeMapping, SQLTableSchema\n'), ((1906, 1991), 'llama_index.core.objects.ObjectIndex.from_objects', 'ObjectIndex.from_objects', (['table_schema_objs', 'table_node_mapping', 'VectorStoreIndex'], {}), '(table_schema_objs, table_node_mapping,\n VectorStoreIndex)\n', (1930, 1991), False, 'from llama_index.core.objects import ObjectIndex, SQLTableNodeMapping, SQLTableSchema\n'), ((2152, 2204), 'llama_index.core.prompts.base.Prompt', 'Prompt', (['template'], {'prompt_type': 'PromptType.TEXT_TO_SQL'}), '(template, prompt_type=PromptType.TEXT_TO_SQL)\n', (2158, 2204), False, 'from llama_index.core.prompts.base import Prompt\n'), ((2418, 2455), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (2446, 2455), False, 'from llama_index.core import ServiceContext, SQLDatabase, VectorStoreIndex, set_global_service_context\n'), ((2465, 2508), 'llama_index.core.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2491, 2508), False, 'from llama_index.core import ServiceContext, SQLDatabase, VectorStoreIndex, set_global_service_context\n'), ((1693, 1730), 'llama_index.core.objects.SQLTableSchema', 'SQLTableSchema', ([], {'table_name': 'table_name'}), '(table_name=table_name)\n', (1707, 1730), False, 'from llama_index.core.objects import ObjectIndex, SQLTableNodeMapping, SQLTableSchema\n'), ((962, 980), 'sqlalchemy.create_engine', 'create_engine', (['uri'], {}), '(uri)\n', (975, 980), False, 'from sqlalchemy import MetaData, create_engine\n'), ((2285, 2332), 'utils.model.get_llm', 'get_llm', (['self.settings', 'self.agent_with_configs'], {}), '(self.settings, self.agent_with_configs)\n', (2292, 2332), False, 'from utils.model import get_llm\n')] |
import os
import time
from dotenv import load_dotenv
from llama_index.agent import ReActAgent
from llama_index.tools import FunctionTool
from llama_index.callbacks import LlamaDebugHandler, CallbackManager
import subprocess
from tools.llm import MYLLM
from tools.tools import MYTOOLBOX
load_dotenv()
debug_mode = True
llama_debug = LlamaDebugHandler(print_trace_on_end=debug_mode)
callback_manager = None
if debug_mode:
callback_manager = CallbackManager(handlers=[llama_debug])
# time.sleep(3)
llm = None
# llm_model = "LOCAL_LAMA2CPP"
if __name__ == "__main__":
print("**** Hello Agents with Llamaindex ****")
# llm = MYLLM.get_llm_model()
llm = MYLLM.get_llm_model("LOCAL_LAMA2CPP", "default", 0.7)
tool1 = FunctionTool.from_defaults(fn=MYTOOLBOX.write_haiku, name="Write Haiku")
tool2 = FunctionTool.from_defaults(
fn=MYTOOLBOX.count_characters, name="Count Characters"
)
tool3 = FunctionTool.from_defaults(
fn=MYTOOLBOX.open_application, name="Open Application"
)
tool4 = FunctionTool.from_defaults(fn=MYTOOLBOX.open_url, name="Open URL")
tools_list = [tool1, tool2, tool3, tool4]
agent = ReActAgent.from_tools(
tools=tools_list, llm=llm, verbose=True, callback_manager=callback_manager
)
res = agent.query(
"Write me a haiku about fennec and then count the characters in it"
)
# res = agent.query("Open Obsidian ain my computer")
# res = agent.query("Open the URL https://www.youtube.com/watch?v=cWc7vYjgnTs in my firefox browser")
print(res)
response = llm.complete("Hello! Can you tell me a poem about cats and dogs?")
print(response.text)
pass
| [
"llama_index.callbacks.CallbackManager",
"llama_index.tools.FunctionTool.from_defaults",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.agent.ReActAgent.from_tools"
] | [((288, 301), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (299, 301), False, 'from dotenv import load_dotenv\n'), ((336, 384), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': 'debug_mode'}), '(print_trace_on_end=debug_mode)\n', (353, 384), False, 'from llama_index.callbacks import LlamaDebugHandler, CallbackManager\n'), ((448, 487), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {'handlers': '[llama_debug]'}), '(handlers=[llama_debug])\n', (463, 487), False, 'from llama_index.callbacks import LlamaDebugHandler, CallbackManager\n'), ((673, 726), 'tools.llm.MYLLM.get_llm_model', 'MYLLM.get_llm_model', (['"""LOCAL_LAMA2CPP"""', '"""default"""', '(0.7)'], {}), "('LOCAL_LAMA2CPP', 'default', 0.7)\n", (692, 726), False, 'from tools.llm import MYLLM\n'), ((740, 812), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'MYTOOLBOX.write_haiku', 'name': '"""Write Haiku"""'}), "(fn=MYTOOLBOX.write_haiku, name='Write Haiku')\n", (766, 812), False, 'from llama_index.tools import FunctionTool\n'), ((825, 912), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'MYTOOLBOX.count_characters', 'name': '"""Count Characters"""'}), "(fn=MYTOOLBOX.count_characters, name=\n 'Count Characters')\n", (851, 912), False, 'from llama_index.tools import FunctionTool\n'), ((934, 1021), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'MYTOOLBOX.open_application', 'name': '"""Open Application"""'}), "(fn=MYTOOLBOX.open_application, name=\n 'Open Application')\n", (960, 1021), False, 'from llama_index.tools import FunctionTool\n'), ((1043, 1109), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'MYTOOLBOX.open_url', 'name': '"""Open URL"""'}), "(fn=MYTOOLBOX.open_url, name='Open URL')\n", (1069, 1109), False, 'from llama_index.tools import FunctionTool\n'), ((1169, 1270), 'llama_index.agent.ReActAgent.from_tools', 'ReActAgent.from_tools', ([], {'tools': 'tools_list', 'llm': 'llm', 'verbose': '(True)', 'callback_manager': 'callback_manager'}), '(tools=tools_list, llm=llm, verbose=True,\n callback_manager=callback_manager)\n', (1190, 1270), False, 'from llama_index.agent import ReActAgent\n')] |
from llama_index.core import Settings, Document, VectorStoreIndex
from llama_index.core.node_parser import SentenceWindowNodeParser
doc = Document(
text="Sentence 1. Sentence 2. Sentence 3."
)
text_splitter = SentenceWindowNodeParser.from_defaults(
window_size=2 ,
window_metadata_key="ContextWindow",
original_text_metadata_key="node_text"
)
Settings.text_splitter = text_splitter
index = VectorStoreIndex.from_documents([doc])
retriever = index.as_retriever(similarity_top_k=1)
response = retriever.retrieve("Display the second sentence")
print(response[0].node.metadata['node_text'])
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.core.Document"
] | [((138, 190), 'llama_index.core.Document', 'Document', ([], {'text': '"""Sentence 1. Sentence 2. Sentence 3."""'}), "(text='Sentence 1. Sentence 2. Sentence 3.')\n", (146, 190), False, 'from llama_index.core import Settings, Document, VectorStoreIndex\n'), ((213, 348), 'llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(2)', 'window_metadata_key': '"""ContextWindow"""', 'original_text_metadata_key': '"""node_text"""'}), "(window_size=2, window_metadata_key=\n 'ContextWindow', original_text_metadata_key='node_text')\n", (251, 348), False, 'from llama_index.core.node_parser import SentenceWindowNodeParser\n'), ((408, 446), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[doc]'], {}), '([doc])\n', (439, 446), False, 'from llama_index.core import Settings, Document, VectorStoreIndex\n')] |
from key1 import KEY
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
from llama_index import download_loader, StorageContext
import os
import openai
os.environ['OPENAI_API_KEY'] = KEY
def create_vector(dir):
documents = SimpleDirectoryReader("data").load_data()
index = GPTVectorStoreIndex.from_documents(documents)
storage_context = StorageContext.from_defaults()
index.storage_context.persist(f"./vector/{dir}")
print ("Done")
'''
def create_vector():
documents = SimpleDirectoryReader("data").load_data()
index = GPTVectorStoreIndex([])
for doc in documents:
index.insert(doc)
storage_context = StorageContext.from_defaults()
print (dir(index.storage_context.index_store))
index.storage_context.persist("./vectordatabase")
print ("Done")
'''
| [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.StorageContext.from_defaults"
] | [((302, 347), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (336, 347), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n'), ((371, 401), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (399, 401), False, 'from llama_index import download_loader, StorageContext\n'), ((248, 277), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (269, 277), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n')] |
import asyncio
from llama_index.core import KeywordTableIndex
from llama_index.core import SimpleDirectoryReader
async def retrieve(retriever, query, label):
response = await retriever.aretrieve(query)
print(f"{label} retrieved {str(len(response))} nodes")
async def main():
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
index = KeywordTableIndex.from_documents(documents)
retriever1 = index.as_retriever(
retriever_mode='default'
)
retriever2 = index.as_retriever(
retriever_mode='simple'
)
query = "Where is the Colosseum?"
await asyncio.gather(
retrieve(retriever1, query, '<llm>'),
retrieve(retriever2, query, '<simple>')
)
asyncio.run(main())
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.KeywordTableIndex.from_documents"
] | [((298, 328), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (319, 328), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((376, 419), 'llama_index.core.KeywordTableIndex.from_documents', 'KeywordTableIndex.from_documents', (['documents'], {}), '(documents)\n', (408, 419), False, 'from llama_index.core import KeywordTableIndex\n')] |
import json
import logging
import streamlit as st
import sys
import os
from dotenv import load_dotenv
from llama_index.chat_engine.types import ChatMode
from llama_index.llms import ChatMessage
from libs.llama_utils import get_llama_memary_index, get_llama_store_index, create_document_index_by_texts, \
create_document_index_by_files, query_knowledge_data
from libs.session import PageSessionState
from llama_index.tools import QueryEngineTool, ToolMetadata, RetrieverTool
from llama_index.agent import OpenAIAgent
from libs.prompts import get_content_from
#
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
sys.path.append(os.path.abspath('..'))
load_dotenv()
def get_ragsbot_page(botname):
page_state = PageSessionState(botname)
# st.sidebar.markdown("# 💡Python 编程导师")
# 用于存储对话记录, 第一条为欢迎消息
page_state.initn_attr("messages", [])
# 用于标记上一条用户消息是否已经处理
page_state.initn_attr("last_user_msg_processed", True)
# 用于标记流式输出是否结束
page_state.initn_attr("streaming_end", True)
resp = query_knowledge_data("Radius", "radiusrfc.chroma.db", "radiusrfc")
if resp:
for r in resp:
st.write(r.get_text())
st.write(r.get_embedding())
def end_chat_streaming():
"""当停止按钮被点击时执行,用于修改处理标志"""
page_state.streaming_end = True
page_state.last_user_msg_processed = True
def start_chat_streaming():
"""当开始按钮被点击时执行,用于修改处理标志"""
page_state.streaming_end = False
page_state.last_user_msg_processed = False
def on_input_prompt(iprompt: str):
if iprompt.strip() == "":
return
page_state.chat_prompt = iprompt
start_chat_streaming()
page_state.add_chat_msg("messages", {"role": "user", "content": page_state.chat_prompt})
with st.chat_message("user"):
st.write(page_state.chat_prompt)
# 文本类文件上传
files = st.sidebar.file_uploader("上传文件", accept_multiple_files=True)
if st.sidebar.button("索引文件"):
if files:
create_document_index_by_files(files, "ragsbot.chroma.db", "default_collection")
page_state.rags_load = True
for msg in page_state.messages:
with st.chat_message(msg["role"]):
st.write(msg["content"])
if not page_state.rags_load:
st.warning("请上传文件")
st.stop()
rindex = get_llama_store_index("ragsbot.chroma.db", "default_collection")
agent = rindex.as_chat_engine(
chat_mode=ChatMode.CONTEXT,
context_prompt="""
"You are a chatbot, able to have normal interactions, as well as talk"
" about radius tech"
"Here are the relevant documents for the context:\n"
"{context_str}"
"\nInstruction: Use the previous chat history, or the context above, to interact and help the user."
""",
verbose=True
)
# 用户输入
if not page_state.last_user_msg_processed:
st.chat_input("请等待上一条消息处理完毕", disabled=True)
else:
if prompt := st.chat_input("输入你的问题"):
on_input_prompt(prompt)
stop_action = st.sidebar.empty()
if not page_state.streaming_end:
stop_action.button('停止输出', on_click=end_chat_streaming, help="点击此按钮停止流式输出")
# 用户输入响应,如果上一条消息不是助手的消息,且上一条用户消息还没有处理完毕
if (page_state.messages
and page_state.messages[-1]["role"] != "assistant"
and not page_state.last_user_msg_processed):
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
messages = [ChatMessage(role=m["role"], content=m["content"]) for m in page_state.messages[-10:-1]]
response = agent.stream_chat(
page_state.chat_prompt,
chat_history=messages,
)
# 流式输出
placeholder = st.empty()
full_response = ''
page_state.add_chat_msg("messages", {"role": "assistant", "content": ""})
for token in response.response_gen:
# # 如果用户手动停止了流式输出,就退出循环
if page_state.streaming_end:
break
if token is not None:
full_response += token
placeholder.markdown(full_response)
page_state.update_last_msg("messages", {"role": "assistant", "content": full_response})
placeholder.markdown(full_response)
stop_action.empty()
end_chat_streaming()
st.sidebar.download_button('导出对话历史',
data=json.dumps(page_state.messages, ensure_ascii=False),
file_name="chat_history.json", mime="application/json")
if st.sidebar.button('清除对话历史'):
page_state.messages = []
st.rerun()
| [
"llama_index.llms.ChatMessage"
] | [((742, 755), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (753, 755), False, 'from dotenv import load_dotenv\n'), ((719, 740), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (734, 740), False, 'import os\n'), ((806, 831), 'libs.session.PageSessionState', 'PageSessionState', (['botname'], {}), '(botname)\n', (822, 831), False, 'from libs.session import PageSessionState\n'), ((1107, 1173), 'libs.llama_utils.query_knowledge_data', 'query_knowledge_data', (['"""Radius"""', '"""radiusrfc.chroma.db"""', '"""radiusrfc"""'], {}), "('Radius', 'radiusrfc.chroma.db', 'radiusrfc')\n", (1127, 1173), False, 'from libs.llama_utils import get_llama_memary_index, get_llama_store_index, create_document_index_by_texts, create_document_index_by_files, query_knowledge_data\n'), ((1974, 2034), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""上传文件"""'], {'accept_multiple_files': '(True)'}), "('上传文件', accept_multiple_files=True)\n", (1998, 2034), True, 'import streamlit as st\n'), ((2043, 2068), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""索引文件"""'], {}), "('索引文件')\n", (2060, 2068), True, 'import streamlit as st\n'), ((2432, 2496), 'libs.llama_utils.get_llama_store_index', 'get_llama_store_index', (['"""ragsbot.chroma.db"""', '"""default_collection"""'], {}), "('ragsbot.chroma.db', 'default_collection')\n", (2453, 2496), False, 'from libs.llama_utils import get_llama_memary_index, get_llama_store_index, create_document_index_by_texts, create_document_index_by_files, query_knowledge_data\n'), ((3159, 3177), 'streamlit.sidebar.empty', 'st.sidebar.empty', ([], {}), '()\n', (3175, 3177), True, 'import streamlit as st\n'), ((4807, 4834), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""清除对话历史"""'], {}), "('清除对话历史')\n", (4824, 4834), True, 'import streamlit as st\n'), ((2380, 2399), 'streamlit.warning', 'st.warning', (['"""请上传文件"""'], {}), "('请上传文件')\n", (2390, 2399), True, 'import streamlit as st\n'), ((2408, 2417), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (2415, 2417), True, 'import streamlit as st\n'), ((3003, 3047), 'streamlit.chat_input', 'st.chat_input', (['"""请等待上一条消息处理完毕"""'], {'disabled': '(True)'}), "('请等待上一条消息处理完毕', disabled=True)\n", (3016, 3047), True, 'import streamlit as st\n'), ((4877, 4887), 'streamlit.rerun', 'st.rerun', ([], {}), '()\n', (4885, 4887), True, 'import streamlit as st\n'), ((1877, 1900), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (1892, 1900), True, 'import streamlit as st\n'), ((1914, 1946), 'streamlit.write', 'st.write', (['page_state.chat_prompt'], {}), '(page_state.chat_prompt)\n', (1922, 1946), True, 'import streamlit as st\n'), ((2100, 2185), 'libs.llama_utils.create_document_index_by_files', 'create_document_index_by_files', (['files', '"""ragsbot.chroma.db"""', '"""default_collection"""'], {}), "(files, 'ragsbot.chroma.db', 'default_collection'\n )\n", (2130, 2185), False, 'from libs.llama_utils import get_llama_memary_index, get_llama_store_index, create_document_index_by_texts, create_document_index_by_files, query_knowledge_data\n'), ((2271, 2299), 'streamlit.chat_message', 'st.chat_message', (["msg['role']"], {}), "(msg['role'])\n", (2286, 2299), True, 'import streamlit as st\n'), ((2313, 2337), 'streamlit.write', 'st.write', (["msg['content']"], {}), "(msg['content'])\n", (2321, 2337), True, 'import streamlit as st\n'), ((3079, 3102), 'streamlit.chat_input', 'st.chat_input', (['"""输入你的问题"""'], {}), "('输入你的问题')\n", (3092, 3102), True, 'import streamlit as st\n'), ((3505, 3533), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (3520, 3533), True, 'import streamlit as st\n'), ((4659, 4710), 'json.dumps', 'json.dumps', (['page_state.messages'], {'ensure_ascii': '(False)'}), '(page_state.messages, ensure_ascii=False)\n', (4669, 4710), False, 'import json\n'), ((3552, 3577), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (3562, 3577), True, 'import streamlit as st\n'), ((3899, 3909), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (3907, 3909), True, 'import streamlit as st\n'), ((3607, 3656), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': "m['role']", 'content': "m['content']"}), "(role=m['role'], content=m['content'])\n", (3618, 3656), False, 'from llama_index.llms import ChatMessage\n')] |
"""Common settings for RAG model"""
from llama_index.core import Settings
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from app.logger.logger import custom_logger
def settings():
"""Set the settings for RAG."""
Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.0)
Settings.embed_model = OpenAIEmbedding(
model="text-embedding-3-small", embed_batch_size=100
)
Settings.context_window = 16000
Settings.num_output = 2048
custom_logger.info("Settings are set")
| [
"llama_index.llms.openai.OpenAI",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((294, 345), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-1106"""', 'temperature': '(0.0)'}), "(model='gpt-3.5-turbo-1106', temperature=0.0)\n", (300, 345), False, 'from llama_index.llms.openai import OpenAI\n'), ((373, 442), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-3-small"""', 'embed_batch_size': '(100)'}), "(model='text-embedding-3-small', embed_batch_size=100)\n", (388, 442), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((529, 567), 'app.logger.logger.custom_logger.info', 'custom_logger.info', (['"""Settings are set"""'], {}), "('Settings are set')\n", (547, 567), False, 'from app.logger.logger import custom_logger\n')] |
from src._classinits import *
from src._errorsinit import *
from src._readapitokens import *
from src._wikidatasearch import *
from llama_index import LLMPredictor,GPTSimpleVectorIndex,PromptHelper,download_loader
from llama_index import GPTSimpleKeywordTableIndex,GPTListIndex
from llama_index.indices.composability import ComposableGraph
from langchain.chat_models import ChatOpenAI
from llama_index.node_parser import SimpleNodeParser
from llama_index import ServiceContext
from warnings import filterwarnings
import gradio as gr
import os
def IGNOREWARNINGOUTPUT()->RESPONSES:
filterwarnings("ignore",category=DeprecationWarning)
filterwarnings("ignore",category=UserWarning)
class MODELRUN(object):
def __init__(self)->CLASSINIT:
self.__api = READAPITOKEN()
self.__api._TOKEN()
self.__base = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"data")
self.__dir = self.__base+"/maindatapsy.csv"
self.__vec = self.__base+"/"
os.environ["OPENAI_API_KEY"] = self.__api.token
self.__rcs = download_loader("SimpleCSVReader")
self.__ndd = SimpleNodeParser()
def __str__(self)->str:
return "MODEL RUN PARAMETERS - SUBPROCESS"
def __call__(self)->None:
return None
def __getstate__(self)->CLASSINIT:
raise TypeError("[DENIED - PERMISSION]")
def __repr__(self)->str:
return MODELRUN.__doc__
def _NODEDOCUMENT(self)->RESPONSES:
loader = self.__rcs()
doc = loader.load_data(file=self.__dir)
nod = self.__ndd.get_nodes_from_documents(doc)
return nod
def _GETPREDICTOR(self,
tem:int=0.1,
mdn:str="gpt-3.5-turbo",
mtk:int=4096)->RESPONSES:
return LLMPredictor(llm=ChatOpenAI(temperature=tem,
model_name=mdn,
max_tokens=mtk))
def _GETPROMPT(self,
mx:int=4096,
ou:int=4096,
ck:int=600,
mc:int=20)->RESPONSES:
return PromptHelper(max_input_size=mx,
chunk_size_limit=ck,
num_output=ou,
max_chunk_overlap=mc)
def _GETVECTOR(self,
dct:str or list or tuple or classmethod,
ctx:classmethod,
fln:str="respsyvec.json")->RESPONSES:
try:
smp = GPTSimpleVectorIndex.from_documents(dct,
service_context=ctx)
smp.save_to_disk(self.__vec+fln)
return smp
except Exception as err:
print(str(err))
def _GETWIKI(self,
ctx:classmethod,
fln:str="reswkkvec.json")->RESPONSES:
try:
__wkk = WIKIDATASEARCH()
__wkk._SEARCH()
smp = GPTSimpleVectorIndex.from_documents(__wkk.targetcontent,
service_context=ctx)
smp.save_to_disk(self.__vec+fln)
return smp
except Exception as err:
print(str(err))
def _SERVICE(self,pred:classmethod,prom:classmethod)->RESPONSES:
return ServiceContext.from_defaults(llm_predictor=pred,
prompt_helper=prom)
def _PREMODELPROCESS(self):
# main structure for parameters
md_ = self._GETPREDICTOR()
pr_ = self._GETPROMPT()
nd_ = self._NODEDOCUMENT()
sr_ = self._SERVICE(md_,pr_)
vc_ = self._GETVECTOR(nd_,sr_)
wc_ = self._GETWIKI(sr_)
def _LOAD(self,
fln:str="respsyvec.json",
wln:str="reswkkvec.json")->RESPONSES:
try:
if os.path.exists(self.__vec+fln) and os.path.exists(self.__vec+wln):
ix = GPTSimpleVectorIndex.load_from_disk(self.__vec+fln)
iw = GPTSimpleVectorIndex.load_from_disk(self.__vec+wln)
return ix,iw
else:
FILEERROR().print()
except Exception as err:
print(str(err))
def _LAUNCH(self,
fln:str="respsyvec.json",
wln:str="reswkkvec.json")->RESULTS:
#control
if not os.path.exists(self.__vec+fln) and not os.path.exists(self.__vec+wln):
self._PREMODELPROCESS()
else:
pass
#loading modules
ix,iw = self._LOAD()
return ix,iw
| [
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((601, 654), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (615, 654), False, 'from warnings import filterwarnings\n'), ((659, 705), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (673, 705), False, 'from warnings import filterwarnings\n'), ((1147, 1181), 'llama_index.download_loader', 'download_loader', (['"""SimpleCSVReader"""'], {}), "('SimpleCSVReader')\n", (1162, 1181), False, 'from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, download_loader\n'), ((1204, 1222), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (1220, 1222), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((2233, 2326), 'llama_index.PromptHelper', 'PromptHelper', ([], {'max_input_size': 'mx', 'chunk_size_limit': 'ck', 'num_output': 'ou', 'max_chunk_overlap': 'mc'}), '(max_input_size=mx, chunk_size_limit=ck, num_output=ou,\n max_chunk_overlap=mc)\n', (2245, 2326), False, 'from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, download_loader\n'), ((3442, 3510), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'pred', 'prompt_helper': 'prom'}), '(llm_predictor=pred, prompt_helper=prom)\n', (3470, 3510), False, 'from llama_index import ServiceContext\n'), ((2625, 2686), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['dct'], {'service_context': 'ctx'}), '(dct, service_context=ctx)\n', (2660, 2686), False, 'from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, download_loader\n'), ((3090, 3167), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['__wkk.targetcontent'], {'service_context': 'ctx'}), '(__wkk.targetcontent, service_context=ctx)\n', (3125, 3167), False, 'from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, download_loader\n'), ((1901, 1960), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'tem', 'model_name': 'mdn', 'max_tokens': 'mtk'}), '(temperature=tem, model_name=mdn, max_tokens=mtk)\n', (1911, 1960), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3992, 4024), 'os.path.exists', 'os.path.exists', (['(self.__vec + fln)'], {}), '(self.__vec + fln)\n', (4006, 4024), False, 'import os\n'), ((4027, 4059), 'os.path.exists', 'os.path.exists', (['(self.__vec + wln)'], {}), '(self.__vec + wln)\n', (4041, 4059), False, 'import os\n'), ((4081, 4134), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['(self.__vec + fln)'], {}), '(self.__vec + fln)\n', (4116, 4134), False, 'from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, download_loader\n'), ((4155, 4208), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['(self.__vec + wln)'], {}), '(self.__vec + wln)\n', (4190, 4208), False, 'from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, download_loader\n'), ((4509, 4541), 'os.path.exists', 'os.path.exists', (['(self.__vec + fln)'], {}), '(self.__vec + fln)\n', (4523, 4541), False, 'import os\n'), ((4548, 4580), 'os.path.exists', 'os.path.exists', (['(self.__vec + wln)'], {}), '(self.__vec + wln)\n', (4562, 4580), False, 'import os\n'), ((904, 929), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (919, 929), False, 'import os\n')] |
import tiktoken
from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.core.callbacks import CallbackManager, TokenCountingHandler
from llama_index.core.llms.mock import MockLLM
embed_model = MockEmbedding(embed_dim=1536)
llm = MockLLM(max_tokens=256)
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode
)
callback_manager = CallbackManager([token_counter])
Settings.embed_model=embed_model
Settings.llm=llm
Settings.callback_manager=callback_manager
documents = SimpleDirectoryReader("cost_prediction_samples").load_data()
index = VectorStoreIndex.from_documents(
documents=documents,
show_progress=True)
print("Embedding Token Count:", token_counter.total_embedding_token_count)
query_engine = index.as_query_engine()
response = query_engine.query("What's the cat's name?")
print("Query LLM Token Count:", token_counter.total_llm_token_count)
print("Query Embedding Token Count:",token_counter.total_embedding_token_count)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llms.mock.MockLLM",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.MockEmbedding"
] | [((249, 278), 'llama_index.core.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(1536)'}), '(embed_dim=1536)\n', (262, 278), False, 'from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings\n'), ((285, 308), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {'max_tokens': '(256)'}), '(max_tokens=256)\n', (292, 308), False, 'from llama_index.core.llms.mock import MockLLM\n'), ((434, 466), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (449, 466), False, 'from llama_index.core.callbacks import CallbackManager, TokenCountingHandler\n'), ((643, 715), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents', 'show_progress': '(True)'}), '(documents=documents, show_progress=True)\n', (674, 715), False, 'from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings\n'), ((574, 622), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""cost_prediction_samples"""'], {}), "('cost_prediction_samples')\n", (595, 622), False, 'from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings\n'), ((361, 405), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (388, 405), False, 'import tiktoken\n')] |
#!/usr/bin/env python3
##
# Chat with PDF document using OpenAI, LlamaIndex, and Gradio
# github.com/deadbits
##
import os
import sys
import logging
import argparse
import openai
import gradio as gr
import urllib.request
from pathlib import Path
from langchain import OpenAI
from llama_index import GPTSimpleVectorIndex
from llama_index import LLMPredictor
from llama_index import ServiceContext
from llama_index import download_loader
# disable some of the more verbose llama_index logging
logging.basicConfig(level=logging.CRITICAL)
# change this to your preferred model
MODEL_NAME = 'gpt-3.5-turbo'
def download_pdf(url, output_path):
urllib.request.urlretrieve(url, output_path)
def load_document(fpath):
print('[status] loading document ({})'.format(fpath))
PDFReader = download_loader('PDFReader')
loader = PDFReader()
docs = loader.load_data(file=Path(fpath))
return docs
def answer_question(url='', fpath='', question='', api_key=''):
if url.strip() == '' and fpath is None:
return '[error] file and url cannot both be empty'
if url.strip() != '' and fpath is not None:
return '[error] file and url cannot both be provided'
if question.strip() == '':
return '[error] question cannot be empty'
if api_key.strip() == '':
return '[error] OpenAI API key cannot be empty'
if url.strip() != '':
download_pdf(url, 'corpus.pdf')
fpath = 'corpus.pdf'
elif fpath != '':
fname = fpath.name
os.rename(fname, 'corpus.pdf')
docs = load_document('corpus.pdf')
llm = LLMPredictor(llm=OpenAI(openai_api_key=api_key, temperature=0, model_name=MODEL_NAME))
ctx = ServiceContext.from_defaults(llm_predictor=llm, chunk_size_limit=1024)
index = GPTSimpleVectorIndex.from_documents(docs, service_context=ctx)
response = index.query(question)
response = str(response)
if response.startswith('\n'):
response = response[1:]
return response
title = 'PDF Chat with OpenAI'
description = """Upload local PDF document or enter URL to PDF"""
with gr.Blocks() as demo:
gr.Markdown(f'<center><h1>{title}</h1></center>')
gr.Markdown(description)
with gr.Row():
with gr.Group():
openai_api_key = gr.Textbox(label='OpenAI API key')
url = gr.Textbox(label='PDF URL to download')
gr.Markdown("<center><h4>OR<h4></center>")
fpath = gr.File(label='Upload PDF', file_types=['.pdf'])
question = gr.Textbox(label='User prompt')
btn = gr.Button(value='Submit')
btn.style(full_width=True)
with gr.Group():
answer = gr.Textbox(label='Response:', lines=15, placeholder='Output')
btn.click(answer_question, inputs=[url, fpath, question, openai_api_key], outputs=[answer])
demo.launch(share=True)
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.download_loader"
] | [((496, 539), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.CRITICAL'}), '(level=logging.CRITICAL)\n', (515, 539), False, 'import logging\n'), ((797, 825), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (812, 825), False, 'from llama_index import download_loader\n'), ((1694, 1764), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm', 'chunk_size_limit': '(1024)'}), '(llm_predictor=llm, chunk_size_limit=1024)\n', (1722, 1764), False, 'from llama_index import ServiceContext\n'), ((1777, 1839), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['docs'], {'service_context': 'ctx'}), '(docs, service_context=ctx)\n', (1812, 1839), False, 'from llama_index import GPTSimpleVectorIndex\n'), ((2099, 2110), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (2108, 2110), True, 'import gradio as gr\n'), ((2125, 2174), 'gradio.Markdown', 'gr.Markdown', (['f"""<center><h1>{title}</h1></center>"""'], {}), "(f'<center><h1>{title}</h1></center>')\n", (2136, 2174), True, 'import gradio as gr\n'), ((2179, 2203), 'gradio.Markdown', 'gr.Markdown', (['description'], {}), '(description)\n', (2190, 2203), True, 'import gradio as gr\n'), ((2214, 2222), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (2220, 2222), True, 'import gradio as gr\n'), ((884, 895), 'pathlib.Path', 'Path', (['fpath'], {}), '(fpath)\n', (888, 895), False, 'from pathlib import Path\n'), ((1515, 1545), 'os.rename', 'os.rename', (['fname', '"""corpus.pdf"""'], {}), "(fname, 'corpus.pdf')\n", (1524, 1545), False, 'import os\n'), ((1614, 1682), 'langchain.OpenAI', 'OpenAI', ([], {'openai_api_key': 'api_key', 'temperature': '(0)', 'model_name': 'MODEL_NAME'}), '(openai_api_key=api_key, temperature=0, model_name=MODEL_NAME)\n', (1620, 1682), False, 'from langchain import OpenAI\n'), ((2238, 2248), 'gradio.Group', 'gr.Group', ([], {}), '()\n', (2246, 2248), True, 'import gradio as gr\n'), ((2279, 2313), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""OpenAI API key"""'}), "(label='OpenAI API key')\n", (2289, 2313), True, 'import gradio as gr\n'), ((2333, 2372), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""PDF URL to download"""'}), "(label='PDF URL to download')\n", (2343, 2372), True, 'import gradio as gr\n'), ((2385, 2427), 'gradio.Markdown', 'gr.Markdown', (['"""<center><h4>OR<h4></center>"""'], {}), "('<center><h4>OR<h4></center>')\n", (2396, 2427), True, 'import gradio as gr\n'), ((2448, 2496), 'gradio.File', 'gr.File', ([], {'label': '"""Upload PDF"""', 'file_types': "['.pdf']"}), "(label='Upload PDF', file_types=['.pdf'])\n", (2455, 2496), True, 'import gradio as gr\n'), ((2521, 2552), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""User prompt"""'}), "(label='User prompt')\n", (2531, 2552), True, 'import gradio as gr\n'), ((2571, 2596), 'gradio.Button', 'gr.Button', ([], {'value': '"""Submit"""'}), "(value='Submit')\n", (2580, 2596), True, 'import gradio as gr\n'), ((2650, 2660), 'gradio.Group', 'gr.Group', ([], {}), '()\n', (2658, 2660), True, 'import gradio as gr\n'), ((2683, 2744), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""Response:"""', 'lines': '(15)', 'placeholder': '"""Output"""'}), "(label='Response:', lines=15, placeholder='Output')\n", (2693, 2744), True, 'import gradio as gr\n')] |
# Import standard libraries
import os
import re
import json
import getpass
import logging
# Import third-party libraries for web scraping, API interactions, and data processing
import requests
import pandas as pd
from bs4 import BeautifulSoup
# Import libraries for interacting with OpenAI and other language models
import openai
import llama_index
from llama_index.llms import OpenAI
from llama_index.embeddings import OpenAIEmbedding
from llama_index.llms import (
CustomLLM,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
# Import for creating web interfaces
import gradio as gr
# Import specific utilities for news feed parsing and query processing
from RAG_utils import NewsFeedParser, HybridRetriever, NewsQueryEngine
with open('config.json') as config_file:
config = json.load(config_file)
# Setup logging
logging.basicConfig(level=logging.INFO)
openai.api_key = config['OPENAI_API_KEY']
os.environ['OPENAI_API_KEY'] = openai.api_key
llm = OpenAI(model="gpt-4", temperature=0.1, max_tokens=512)
embed_model = OpenAIEmbedding()
def chatbot(input_text):
# Create an instance of NewsFeedParser and process query
news_parser = NewsFeedParser()
documents = news_parser.process_and_chunk_articles(input_text)
# Initialize the query engine with the processed documents
pdf_query_engine = NewsQueryEngine(documents, llm, embed_model)
query_engine = pdf_query_engine.setup_query_engine()
# Process the query using the query engine
response = query_engine.query(input_text)
return response
# Gradio interface setup
iface = gr.Interface(
fn=chatbot,
inputs=gr.components.Textbox(lines=3, label="Enter your text:"),
outputs=gr.components.Textbox(lines=20, label="Answer:"),
title="FinWise Explorer"
)
# Launch the Gradio interface
iface.launch(share=True)
| [
"llama_index.llms.OpenAI",
"llama_index.embeddings.OpenAIEmbedding"
] | [((854, 893), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (873, 893), False, 'import logging\n'), ((990, 1044), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0.1)', 'max_tokens': '(512)'}), "(model='gpt-4', temperature=0.1, max_tokens=512)\n", (996, 1044), False, 'from llama_index.llms import OpenAI\n'), ((1059, 1076), 'llama_index.embeddings.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1074, 1076), False, 'from llama_index.embeddings import OpenAIEmbedding\n'), ((810, 832), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (819, 832), False, 'import json\n'), ((1183, 1199), 'RAG_utils.NewsFeedParser', 'NewsFeedParser', ([], {}), '()\n', (1197, 1199), False, 'from RAG_utils import NewsFeedParser, HybridRetriever, NewsQueryEngine\n'), ((1354, 1398), 'RAG_utils.NewsQueryEngine', 'NewsQueryEngine', (['documents', 'llm', 'embed_model'], {}), '(documents, llm, embed_model)\n', (1369, 1398), False, 'from RAG_utils import NewsFeedParser, HybridRetriever, NewsQueryEngine\n'), ((1645, 1701), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(3)', 'label': '"""Enter your text:"""'}), "(lines=3, label='Enter your text:')\n", (1666, 1701), True, 'import gradio as gr\n'), ((1715, 1763), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(20)', 'label': '"""Answer:"""'}), "(lines=20, label='Answer:')\n", (1736, 1763), True, 'import gradio as gr\n')] |
import torch
from langchain.llms.base import LLM
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding
from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex
from peft import PeftModel
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
hf_model_path = "models/llama-7b"
alpaca_model_path = "models/lora-alpaca"
tokenizer = LlamaTokenizer.from_pretrained(hf_model_path)
model = LlamaForCausalLM.from_pretrained(
hf_model_path,
load_in_8bit=True, #Dissabling could solve some errors
device_map="auto",
)
model = PeftModel.from_pretrained(model, alpaca_model_path)
device = torch.device("cuda") if torch.cuda.is_available() else "cpu"
max_length = 1500 #2048
max_new_tokens = 48
class LLaMALLM(LLM):
def _call(self, prompt, stop=None):
prompt += "### Response:"
inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].cuda()
generation_config = GenerationConfig(
temperature=0.6,
top_p=0.95,
repetition_penalty=1.15,
)
with torch.no_grad():
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=128,
)
response = ""
for s in generation_output.sequences:
response += tokenizer.decode(s)
response = response[len(prompt):]
print("Model Response:", response)
return response
def _identifying_params(self):
return {"name_of_model": "alpaca"}
def _llm_type(self):
return "custom"
max_input_size = max_length
num_output = max_new_tokens
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
documents = SimpleDirectoryReader('data').load_data()
llm_predictor = LLMPredictor(llm=LLaMALLM())
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper)
index.save_to_disk('index.json')
new_index = GPTSimpleVectorIndex.load_from_disk('index.json', embed_model=embed_model, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
response = new_index.query("What did Gatsby do before he met Daisy?")
print(response.response)
response = new_index.query("What did the narrator do after getting back to Chicago?")
print(response.response)
| [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.PromptHelper"
] | [((460, 505), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['hf_model_path'], {}), '(hf_model_path)\n', (490, 505), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((517, 606), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['hf_model_path'], {'load_in_8bit': '(True)', 'device_map': '"""auto"""'}), "(hf_model_path, load_in_8bit=True,\n device_map='auto')\n", (549, 606), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((667, 718), 'peft.PeftModel.from_pretrained', 'PeftModel.from_pretrained', (['model', 'alpaca_model_path'], {}), '(model, alpaca_model_path)\n', (692, 718), False, 'from peft import PeftModel\n'), ((1997, 2056), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (2009, 2056), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex\n'), ((2226, 2345), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, embed_model=\n embed_model, prompt_helper=prompt_helper)\n', (2246, 2345), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex\n'), ((2390, 2526), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['"""index.json"""'], {'embed_model': 'embed_model', 'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), "('index.json', embed_model=embed_model,\n llm_predictor=llm_predictor, prompt_helper=prompt_helper)\n", (2425, 2526), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex\n'), ((757, 782), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (780, 782), False, 'import torch\n'), ((733, 753), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (745, 753), False, 'import torch\n'), ((2091, 2114), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (2112, 2114), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1088, 1158), 'transformers.GenerationConfig', 'GenerationConfig', ([], {'temperature': '(0.6)', 'top_p': '(0.95)', 'repetition_penalty': '(1.15)'}), '(temperature=0.6, top_p=0.95, repetition_penalty=1.15)\n', (1104, 1158), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((2129, 2158), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (2150, 2158), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex\n'), ((1224, 1239), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1237, 1239), False, 'import torch\n')] |
import os
import sys
from random import randrange
from typing import List, Union
from datetime import date
from dotenv import load_dotenv
from fastapi import FastAPI, File, Request, UploadFile, Path
from fastapi.logger import logger
from fastapi.responses import JSONResponse
from pyngrok import ngrok
import os
import weaviate
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from supabase import Client, create_client
import shutil
import nest_asyncio
import json
import logging
from llama_index.core import StorageContext
from llama_index.core import VectorStoreIndex,SimpleDirectoryReader,ServiceContext,PromptTemplate
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.clarifai import Clarifai
from datetime import datetime
from datetime import datetime, timedelta, timezone
from llama_index.core import Document, VectorStoreIndex
from llama_index.core import StorageContext
import weaviate
nest_asyncio.apply()
# Get the current date and time
current_date_time = datetime.now()
current_date = current_date_time.date()
# Load environment variables from the .env file
load_dotenv()
""" We need to pass the 'Bot User OAuth Token' """
# slack_token = os.environ.get('SLACK_BOT_TOKEN')
os.environ["CLARIFAI_PAT"] = os.getenv("CLARIFAI_PAT") #you can replace with your PAT or use mine
slack_token = os.getenv("SLACK_TOKEN")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
url = os.getenv("SUPABASE_URL")
key = os.getenv("SUPABASE_ANON_KEY")
WEAVIATE_API_KEY = os.getenv("WEAVIATE_API_KEY")
WEAVIATE_URL = os.getenv("WEAVIATE_URL")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
client = WebClient(token=slack_token)
supabase: Client = create_client(url, key)
auth_config = weaviate.AuthApiKey(api_key=WEAVIATE_API_KEY)
client = weaviate.Client(
url=WEAVIATE_URL,
auth_client_secret=auth_config
)
# try:
# Posting a message in #random channel
# response = client.chat_postMessage(channel="meetings", text="Testingaaa \n line 2")
# print("Done 1")
# Sending a message to a particular user
# response = client.chat_postEphemeral(
# channel="random", text="Hello U06ETJNQX6E", user="U06ETJNQX6E"
# )
# print("Done 2")
# Get basic information of the channel where our Bot has access
# response = client.conversations_info(channel="random")
# print("Done 3")
# Get a list of conversations
# response = client.conversations_list()
# print(response["channels"])
# except SlackApiError as e:
# assert e.response["error"]
# print("slack bot error")
app = FastAPI()
import os
def search_and_query(text, ask):
text_list = [text]
documents = [Document(text=t) for t in text_list]
vector_store = WeaviateVectorStore(weaviate_client=client)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
query_engine = index.as_query_engine(similarity_top_k=2)
response = query_engine.query(ask)
return response
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.get("/items/{item_id}")
def read_item(item_id: int, q: Union[str, None] = None):
return {"item_id": item_id, "q": q}
@app.post("/transcription")
async def upload_file(file: UploadFile = File(...)):
contents = await file.read()
data = contents.decode("utf-8")
print(type(data))
try:
# send to clarifai with prompt to summarize it the transcript
llm_model = Clarifai(model_url="https://clarifai.com/openai/chat-completion/models/gpt-4-turbo")
summary = llm_model.complete(prompt=f'''
Please generate a concise summary of the following Zoom meeting transcription, in this format. Instead of new lines, put the literal
characters '\n' without the quotes for formatting:
Highlighting the key takeaways, major discussion points, and relevant speakers. The summary should follow the format below:
Topic: [Main topic of the meeting]
Speakers:
- [List the speakers' names along with their notable contributions or comments]
Summary:
- [Provide a brief summary of the meeting's main topics and discussions, capturing the essence of the conversation]
Transcription: {data}
''')
summary = (str(summary))
supabase.table("transcripts").insert({"transcript": data, "summary": summary}).execute()
client.chat_postMessage(channel="meetings", text=summary)
return JSONResponse(
content={"message": "Transcript summarized with gpt-4-turbo and saved to supabase"},
status_code=200,
)
except Exception as e:
return JSONResponse(
content={"message": "Error printing file contents"}, status_code=500
)
@app.post("/dailysummary")
def daily_summary():
today = date.today()
today_start = today.isoformat() + "T00:00:00Z"
today_end = today.isoformat() + "T23:59:59Z"
#response = supabase.table("transcripts").select('*').execute()
print("HAHAHAHAHA")
response = supabase.table("transcripts").select().filter("created_at", "gte", today_start).filter("created_at", "lte", today_end).execute()
summaries = response.data
print(summaries)
summaries_string = "\n\n".join([summary['summary'] for summary in summaries])
full_message = "*Daily meeting summaries*\n\n" + summaries_string
print(full_message)
client.chat_postMessage(channel="meetings", text=full_message)
return JSONResponse(content="Summaries posted to Slack", status_code=200)
# response = client.conversations_info(channel="random")
# response = client.conversations_list()
# print(response["channels"])
@app.post("/rag")
def daily_summary(data: dict):
messages = data.get("messages", [])
user_message = next((msg["content"] for msg in messages if msg["role"] == "user"), None)
response = supabase.table("transcripts").select('transcript').execute()
summaries_dated = response.data
text_string = str(summaries_dated)
out = search_and_query(text_string, user_message)
return JSONResponse(content={"title": f"{out}"}, status_code=200) | [
"llama_index.vector_stores.weaviate.WeaviateVectorStore",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.Document",
"llama_index.llms.clarifai.Clarifai"
] | [((1032, 1052), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (1050, 1052), False, 'import nest_asyncio\n'), ((1109, 1123), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1121, 1123), False, 'from datetime import datetime, timedelta, timezone\n'), ((1214, 1227), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1225, 1227), False, 'from dotenv import load_dotenv\n'), ((1360, 1385), 'os.getenv', 'os.getenv', (['"""CLARIFAI_PAT"""'], {}), "('CLARIFAI_PAT')\n", (1369, 1385), False, 'import os\n'), ((1443, 1467), 'os.getenv', 'os.getenv', (['"""SLACK_TOKEN"""'], {}), "('SLACK_TOKEN')\n", (1452, 1467), False, 'import os\n'), ((1485, 1512), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1494, 1512), False, 'import os\n'), ((1519, 1544), 'os.getenv', 'os.getenv', (['"""SUPABASE_URL"""'], {}), "('SUPABASE_URL')\n", (1528, 1544), False, 'import os\n'), ((1551, 1581), 'os.getenv', 'os.getenv', (['"""SUPABASE_ANON_KEY"""'], {}), "('SUPABASE_ANON_KEY')\n", (1560, 1581), False, 'import os\n'), ((1601, 1630), 'os.getenv', 'os.getenv', (['"""WEAVIATE_API_KEY"""'], {}), "('WEAVIATE_API_KEY')\n", (1610, 1630), False, 'import os\n'), ((1646, 1671), 'os.getenv', 'os.getenv', (['"""WEAVIATE_URL"""'], {}), "('WEAVIATE_URL')\n", (1655, 1671), False, 'import os\n'), ((1689, 1716), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1698, 1716), False, 'import os\n'), ((1772, 1800), 'slack_sdk.WebClient', 'WebClient', ([], {'token': 'slack_token'}), '(token=slack_token)\n', (1781, 1800), False, 'from slack_sdk import WebClient\n'), ((1820, 1843), 'supabase.create_client', 'create_client', (['url', 'key'], {}), '(url, key)\n', (1833, 1843), False, 'from supabase import Client, create_client\n'), ((1859, 1904), 'weaviate.AuthApiKey', 'weaviate.AuthApiKey', ([], {'api_key': 'WEAVIATE_API_KEY'}), '(api_key=WEAVIATE_API_KEY)\n', (1878, 1904), False, 'import weaviate\n'), ((1915, 1980), 'weaviate.Client', 'weaviate.Client', ([], {'url': 'WEAVIATE_URL', 'auth_client_secret': 'auth_config'}), '(url=WEAVIATE_URL, auth_client_secret=auth_config)\n', (1930, 1980), False, 'import weaviate\n'), ((2655, 2664), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (2662, 2664), False, 'from fastapi import FastAPI, File, Request, UploadFile, Path\n'), ((2805, 2848), 'llama_index.vector_stores.weaviate.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'client'}), '(weaviate_client=client)\n', (2824, 2848), False, 'from llama_index.vector_stores.weaviate import WeaviateVectorStore\n'), ((2871, 2926), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2899, 2926), False, 'from llama_index.core import StorageContext\n'), ((2939, 3014), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (2970, 3014), False, 'from llama_index.core import Document, VectorStoreIndex\n'), ((3395, 3404), 'fastapi.File', 'File', (['...'], {}), '(...)\n', (3399, 3404), False, 'from fastapi import FastAPI, File, Request, UploadFile, Path\n'), ((5012, 5024), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5022, 5024), False, 'from datetime import date\n'), ((5666, 5732), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': '"""Summaries posted to Slack"""', 'status_code': '(200)'}), "(content='Summaries posted to Slack', status_code=200)\n", (5678, 5732), False, 'from fastapi.responses import JSONResponse\n'), ((6275, 6333), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': "{'title': f'{out}'}", 'status_code': '(200)'}), "(content={'title': f'{out}'}, status_code=200)\n", (6287, 6333), False, 'from fastapi.responses import JSONResponse\n'), ((2749, 2765), 'llama_index.core.Document', 'Document', ([], {'text': 't'}), '(text=t)\n', (2757, 2765), False, 'from llama_index.core import Document, VectorStoreIndex\n'), ((3597, 3686), 'llama_index.llms.clarifai.Clarifai', 'Clarifai', ([], {'model_url': '"""https://clarifai.com/openai/chat-completion/models/gpt-4-turbo"""'}), "(model_url=\n 'https://clarifai.com/openai/chat-completion/models/gpt-4-turbo')\n", (3605, 3686), False, 'from llama_index.llms.clarifai import Clarifai\n'), ((4654, 4776), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': "{'message': 'Transcript summarized with gpt-4-turbo and saved to supabase'}", 'status_code': '(200)'}), "(content={'message':\n 'Transcript summarized with gpt-4-turbo and saved to supabase'},\n status_code=200)\n", (4666, 4776), False, 'from fastapi.responses import JSONResponse\n'), ((4846, 4932), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': "{'message': 'Error printing file contents'}", 'status_code': '(500)'}), "(content={'message': 'Error printing file contents'},\n status_code=500)\n", (4858, 4932), False, 'from fastapi.responses import JSONResponse\n')] |
from ..launcher.processor_event import ProcessorEvent
from ..launcher.event_type import EventType
from ...llms.utils.max_token_for_model import max_token_for_model, nb_token_for_input
from ...llms.prompt_engine.simple_prompt_engine import SimplePromptEngine
from ...llms.prompt_engine.vector_index_prompt_engine import VectorIndexPromptEngine
from ..context.processor_context import ProcessorContext
from .processor import APIContextProcessor
from .processor_type_name_utils import ProcessorType
from llama_index.llms.base import ChatMessage
class LLMPromptProcessor(APIContextProcessor):
processor_type = ProcessorType.LLM_PROMPT
DEFAULT_MODEL = "gpt-4"
def __init__(self, config, api_context_data: ProcessorContext):
super().__init__(config, api_context_data)
self.model = config.get("model", LLMPromptProcessor.DEFAULT_MODEL)
self.prompt = config["prompt"]
self.api_key = api_context_data.get_api_key_for_model(self.model)
def process(self):
input_data = None
if self.get_input_processor() is not None:
input_data = self.get_input_processor().get_output(
self.get_input_node_output_key()
)
if input_data is not None and nb_token_for_input(
input_data, self.model
) > max_token_for_model(self.model):
prompt_engine = VectorIndexPromptEngine(
model=self.model, api_key=self.api_key, init_data=input_data
)
awnser = prompt_engine.prompt(self.prompt)
else:
self.init_context(input_data)
prompt_engine = SimplePromptEngine(model=self.model, api_key=self.api_key)
stream_chat_response = prompt_engine.prompt_stream(self.messages)
awnser = ""
for r in stream_chat_response:
awnser += r.delta
event = ProcessorEvent(self, awnser)
self.notify(EventType.PROGRESS, event)
self.set_output(awnser)
return awnser
def init_context(self, input_data: str) -> None:
"""
Initialise the context for the LLM model with a standard set of messages.
Additional user input data can be provided, which will be added to the messages.
:param input_data: additional information to be used by the assistant.
"""
if input_data is None:
system_msg = "You are a helpful assistant. "
user_msg_content = self.prompt
else:
system_msg = (
"You are a helpful assistant. "
"You will respond to requests indicated by the '#Request' tag, "
"using the context provided under the '#Context' tag."
"Your response should feel natural and seamless, as if you've internalized the context "
"and are answering the request without needing to directly point back to the information provided"
)
user_msg_content = f"#Context: {input_data} \n\n#Request: {self.prompt}"
self.messages = [
ChatMessage(role="system", content=system_msg),
ChatMessage(role="user", content=user_msg_content),
]
def cancel(self):
pass
def update_context(self, data):
pass
| [
"llama_index.llms.base.ChatMessage"
] | [((3084, 3130), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': 'system_msg'}), "(role='system', content=system_msg)\n", (3095, 3130), False, 'from llama_index.llms.base import ChatMessage\n'), ((3144, 3194), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'user_msg_content'}), "(role='user', content=user_msg_content)\n", (3155, 3194), False, 'from llama_index.llms.base import ChatMessage\n')] |
import json
from llama_index.core.service_context_elements.llm_predictor import LLMPredictor
from llama_index.core.utilities.sql_wrapper import SQLDatabase
from llama_index.core.response_synthesizers import get_response_synthesizer
from llama_index.embeddings.langchain import LangchainEmbedding
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.postprocessor import SimilarityPostprocessor
from llama_index.core.prompts import PromptTemplate
from llama_index.core.chat_engine import CondensePlusContextChatEngine, ContextChatEngine
from llama_index.core.indices.struct_store.sql_query import NLSQLTableQueryEngine
from llama_index.core.schema import ImageDocument
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.postprocessor.llm_rerank import LLMRerank
from llama_index.postprocessor.colbert_rerank import ColbertRerank
from llama_index.core.tools import ToolMetadata
from llama_index.core.selectors import LLMSingleSelector
from langchain.agents import initialize_agent
import ollama
from sqlalchemy import create_engine
from app.llms.tools.dalle import DalleImage
from app.llms.tools.describeimage import DescribeImage
from app.llms.tools.instantid import InstantID
from app.llms.tools.stablediffusion import StableDiffusionImage
from app.model import Model
from app.models import LLMModel, ProjectModel, QuestionModel, ChatModel
from app.project import Project
from app.tools import getLLMClass
from app.vectordb import vector_init
from modules.embeddings import EMBEDDINGS
from app.database import dbc
from sqlalchemy.orm import Session
from langchain_community.chat_models import ChatOpenAI
from transformers import pipeline
class Brain:
def __init__(self):
self.llmCache = {}
self.embeddingCache = {}
self.defaultCensorship = "This question is outside of my scope. Didn't find any related data."
self.defaultNegative = "I'm sorry, I don't know the answer to that."
self.defaultSystem = ""
self.loopFailsafe = 0
def memoryModelsInfo(self):
models = []
for llmr, mr in self.llmCache.items():
if mr.privacy == "private":
models.append(llmr)
return models
def getLLM(self, llmName, db: Session, **kwargs):
llm = None
if llmName in self.llmCache:
llm = self.llmCache[llmName]
else:
llm = self.loadLLM(llmName, db)
if llm.props.class_name == "Ollama":
model_name = json.loads(llm.props.options).get("model")
try:
ollama.show(model_name)
except Exception as e:
if e.status_code == 404:
print("Model not found, pulling " + model_name + " from Ollama")
ollama.pull(model_name)
else:
raise e
return llm
def loadLLM(self, llmName, db: Session):
llm_db = dbc.get_llm_by_name(db, llmName)
if llm_db is not None:
llmm = LLMModel.model_validate(llm_db)
llm = getLLMClass(llmm.class_name)(**json.loads(llmm.options))
if llmName in self.llmCache:
del self.llmCache[llmName]
self.llmCache[llmName] = Model(llmName, llmm, llm)
return self.llmCache[llmName]
else:
return None
def getEmbedding(self, embeddingModel):
if embeddingModel in self.embeddingCache:
return self.embeddingCache[embeddingModel]
else:
if embeddingModel in EMBEDDINGS:
embedding_class, embedding_args, privacy, description = EMBEDDINGS[embeddingModel]
model = LangchainEmbedding(embedding_class(**embedding_args))
self.embeddingCache[embeddingModel] = model
return model
else:
raise Exception("Invalid Embedding type.")
def findProject(self, name, db):
p = dbc.get_project_by_name(db, name)
if p is None:
return None
proj = ProjectModel.model_validate(p)
if proj is not None:
project = Project()
project.model = proj
if project.model.type == "rag":
project.db = vector_init(self, project)
return project
def entryChat(self, projectName: str, chatModel: ChatModel, db: Session):
project = self.findProject(projectName, db)
model = self.getLLM(project.model.llm, db)
chat = project.loadChat(chatModel)
threshold = chatModel.score or project.model.score or 0.2
k = chatModel.k or project.model.k or 1
sysTemplate = project.model.system or self.defaultSystem
if project.model.colbert_rerank or project.model.llm_rerank:
final_k = k * 2
else:
final_k = k
retriever = VectorIndexRetriever(
index=project.db,
similarity_top_k=final_k,
)
postprocessors = []
if project.model.colbert_rerank:
postprocessors.append(ColbertRerank(
top_n=k,
model="colbert-ir/colbertv2.0",
tokenizer="colbert-ir/colbertv2.0",
keep_retrieval_score=True,
))
if project.model.llm_rerank:
postprocessors.append(LLMRerank(
choice_batch_size=k,
top_n=k,
llm=model.llm,
))
postprocessors.append(SimilarityPostprocessor(similarity_cutoff=threshold))
chat_engine = ContextChatEngine.from_defaults(
retriever=retriever,
system_prompt=sysTemplate,
memory=chat.history,
node_postprocessors=postprocessors,
)
chat_engine._llm = model.llm
try:
if chatModel.stream:
response = chat_engine.stream_chat(chatModel.question)
else:
response = chat_engine.chat(chatModel.question)
output_nodes = []
for node in response.source_nodes:
output_nodes.append(
{"source": node.metadata["source"], "keywords": node.metadata["keywords"], "score": node.score, "id": node.node_id, "text": node.text})
output = {
"id": chat.id,
"question": chatModel.question,
"sources": output_nodes,
"type": "chat"
}
if chatModel.stream:
if hasattr(response, "response_gen"):
for text in response.response_gen:
yield "data: " + text + "\n\n"
yield "data: " + json.dumps(output) + "\n"
yield "event: close\n\n"
else:
yield "data: " + self.defaultCensorship + "\n\n"
yield "data: " + json.dumps(output) + "\n"
yield "event: close\n\n"
else:
if len(response.source_nodes) == 0:
output["answer"] = project.model.censorship or self.defaultCensorship
else:
output["answer"] = response.response
yield output
except Exception as e:
if chatModel.stream:
yield "data: Inference failed\n"
yield "event: error\n\n"
raise e
def entryQuestion(self, projectName: str, questionModel: QuestionModel, db: Session):
project = self.findProject(projectName, db)
model = self.getLLM(project.model.llm, db)
sysTemplate = questionModel.system or project.model.system or self.defaultSystem
k = questionModel.k or project.model.k or 2
threshold = questionModel.score or project.model.score or 0.2
if questionModel.colbert_rerank or questionModel.llm_rerank or project.model.colbert_rerank or project.model.llm_rerank:
final_k = k * 2
else:
final_k = k
retriever = VectorIndexRetriever(
index=project.db,
similarity_top_k=final_k,
)
qa_prompt_tmpl = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"Query: {query_str}\n"
"Answer: "
)
qa_prompt = PromptTemplate(qa_prompt_tmpl)
llm_predictor = LLMPredictor(llm=model.llm, system_prompt=sysTemplate)
response_synthesizer = get_response_synthesizer(llm=llm_predictor, text_qa_template=qa_prompt, streaming=questionModel.stream)
postprocessors = []
if questionModel.colbert_rerank or project.model.colbert_rerank:
postprocessors.append(ColbertRerank(
top_n=k,
model="colbert-ir/colbertv2.0",
tokenizer="colbert-ir/colbertv2.0",
keep_retrieval_score=True,
))
if questionModel.llm_rerank or project.model.llm_rerank:
postprocessors.append(LLMRerank(
choice_batch_size=k,
top_n=k,
llm=model.llm,
))
postprocessors.append(SimilarityPostprocessor(similarity_cutoff=threshold))
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=postprocessors
)
try:
response = query_engine.query(questionModel.question)
output_nodes = []
if hasattr(response, "source_nodes"):
for node in response.source_nodes:
output_nodes.append(
{"source": node.metadata["source"], "keywords": node.metadata["keywords"], "score": node.score, "id": node.node_id, "text": node.text})
output = {
"question": questionModel.question,
"sources": output_nodes,
"type": "question"
}
if questionModel.stream:
if hasattr(response, "response_gen"):
for text in response.response_gen:
yield "data: " + text + "\n\n"
yield "data: " + json.dumps(output) + "\n"
yield "event: close\n\n"
else :
yield "data: " + self.defaultCensorship + "\n\n"
yield "data: " + json.dumps(output) + "\n"
yield "event: close\n\n"
else:
if len(response.source_nodes) == 0:
output["answer"] = project.model.censorship or self.defaultCensorship
else:
output["answer"] = response.response
yield output
except Exception as e:
if questionModel.stream:
yield "data: Inference failed\n"
yield "event: error\n\n"
raise e
def entryVision(self, projectName, visionInput, isprivate, db: Session):
image = None
output = ""
project = self.findProject(projectName, db)
if project is None:
raise Exception("Project not found")
tools = [
DalleImage(),
StableDiffusionImage(),
DescribeImage(),
InstantID(),
]
if isprivate:
tools.pop(0)
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
agent = initialize_agent(
tools, llm, agent="zero-shot-react-description", verbose=True)
outputAgent = agent.run(visionInput.question, tags=[visionInput])
if isinstance(outputAgent, str):
output = outputAgent
else:
if outputAgent["type"] == "describeimage":
model = self.getLLM(project.model.llm, db)
try:
response = model.llm.complete(prompt=visionInput.question, image_documents=[ImageDocument(image=visionInput.image)])
except Exception as e:
raise e
output = response.text
image = visionInput.image
else:
output = outputAgent["prompt"]
image = outputAgent["image"]
outputf = {
"question": visionInput.question,
"answer": output,
"image": image,
"sources": [],
"type": "vision"
}
return outputf
def inference(self, projectName, inferenceModel, db: Session):
project = self.findProject(projectName, db)
if project is None:
raise Exception("Project not found")
model = self.getLLM(project.model.llm, db)
sysTemplate = inferenceModel.system or project.model.system or self.defaultSystem
model.llm.system_prompt = sysTemplate
#model.llm.system = sysTemplate
#resp = model.llm.complete(inferenceModel.question)
messages = [
ChatMessage(
role="system", content=sysTemplate
),
ChatMessage(role="user", content=inferenceModel.question),
]
try:
if(inferenceModel.stream):
respgen = model.llm.stream_chat(messages)
for text in respgen:
yield "data: " + text.delta + "\n\n"
yield "event: close\n\n"
else:
resp = model.llm.chat(messages)
output = {
"question": inferenceModel.question,
"answer": resp.message.content.strip(),
"type": "inference"
}
yield output
except Exception as e:
if inferenceModel.stream:
yield "data: Inference failed\n"
yield "event: error\n\n"
raise e
def ragSQL(self, projectName, questionModel, db: Session):
project = self.findProject(projectName, db)
if project is None:
raise Exception("Project not found")
model = self.getLLM(project.model.llm, db)
engine = create_engine(project.model.connection)
sql_database = SQLDatabase(engine)
tables = None
if hasattr(questionModel, 'tables') and questionModel.tables is not None:
tables = questionModel.tables
elif project.model.tables:
tables = [table.strip() for table in project.model.tables.split(',')]
query_engine = NLSQLTableQueryEngine(
llm=model.llm,
sql_database=sql_database,
tables=tables,
)
question = (project.model.system or self.defaultSystem) + "\n Question: " + questionModel.question
try:
response = query_engine.query(question)
except Exception as e:
raise e
output = {
"question": questionModel.question,
"answer": response.response,
"sources": [response.metadata['sql_query']],
"type": "questionsql"
}
return output
def router(self, projectName, questionModel, db: Session):
choices = []
project = self.findProject(projectName, db)
if project is None:
raise Exception("Project not found")
for entrance in project.model.entrances:
choices.append(ToolMetadata(description=entrance.description, name=entrance.name))
selector = LLMSingleSelector.from_defaults()
selector_result = selector.select(
choices, query=questionModel.question
)
projectNameDest = project.model.entrances[selector_result.selections[0].index].destination
return projectNameDest
def classify(self, input):
classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
sequence_to_classify = input.sequence
candidate_labels = input.labels
return classifier(sequence_to_classify, candidate_labels, multi_label=True) | [
"llama_index.core.prompts.PromptTemplate",
"llama_index.core.indices.struct_store.sql_query.NLSQLTableQueryEngine",
"llama_index.postprocessor.colbert_rerank.ColbertRerank",
"llama_index.core.tools.ToolMetadata",
"llama_index.core.query_engine.RetrieverQueryEngine",
"llama_index.core.schema.ImageDocument",
"llama_index.core.retrievers.VectorIndexRetriever",
"llama_index.core.postprocessor.llm_rerank.LLMRerank",
"llama_index.core.selectors.LLMSingleSelector.from_defaults",
"llama_index.core.service_context_elements.llm_predictor.LLMPredictor",
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.core.utilities.sql_wrapper.SQLDatabase",
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.chat_engine.ContextChatEngine.from_defaults",
"llama_index.core.postprocessor.SimilarityPostprocessor"
] | [((3043, 3075), 'app.database.dbc.get_llm_by_name', 'dbc.get_llm_by_name', (['db', 'llmName'], {}), '(db, llmName)\n', (3062, 3075), False, 'from app.database import dbc\n'), ((4079, 4112), 'app.database.dbc.get_project_by_name', 'dbc.get_project_by_name', (['db', 'name'], {}), '(db, name)\n', (4102, 4112), False, 'from app.database import dbc\n'), ((4174, 4204), 'app.models.ProjectModel.model_validate', 'ProjectModel.model_validate', (['p'], {}), '(p)\n', (4201, 4204), False, 'from app.models import LLMModel, ProjectModel, QuestionModel, ChatModel\n'), ((4998, 5062), 'llama_index.core.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'project.db', 'similarity_top_k': 'final_k'}), '(index=project.db, similarity_top_k=final_k)\n', (5018, 5062), False, 'from llama_index.core.retrievers import VectorIndexRetriever\n'), ((5712, 5853), 'llama_index.core.chat_engine.ContextChatEngine.from_defaults', 'ContextChatEngine.from_defaults', ([], {'retriever': 'retriever', 'system_prompt': 'sysTemplate', 'memory': 'chat.history', 'node_postprocessors': 'postprocessors'}), '(retriever=retriever, system_prompt=\n sysTemplate, memory=chat.history, node_postprocessors=postprocessors)\n', (5743, 5853), False, 'from llama_index.core.chat_engine import CondensePlusContextChatEngine, ContextChatEngine\n'), ((8196, 8260), 'llama_index.core.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'project.db', 'similarity_top_k': 'final_k'}), '(index=project.db, similarity_top_k=final_k)\n', (8216, 8260), False, 'from llama_index.core.retrievers import VectorIndexRetriever\n'), ((8669, 8699), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['qa_prompt_tmpl'], {}), '(qa_prompt_tmpl)\n', (8683, 8699), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((8725, 8779), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'model.llm', 'system_prompt': 'sysTemplate'}), '(llm=model.llm, system_prompt=sysTemplate)\n', (8737, 8779), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor\n'), ((8812, 8919), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'llm': 'llm_predictor', 'text_qa_template': 'qa_prompt', 'streaming': 'questionModel.stream'}), '(llm=llm_predictor, text_qa_template=qa_prompt,\n streaming=questionModel.stream)\n', (8836, 8919), False, 'from llama_index.core.response_synthesizers import get_response_synthesizer\n'), ((9591, 9716), 'llama_index.core.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer', 'node_postprocessors': 'postprocessors'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer, node_postprocessors=postprocessors)\n', (9611, 9716), False, 'from llama_index.core.query_engine import RetrieverQueryEngine\n'), ((11773, 11821), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo', temperature=0)\n", (11783, 11821), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((11839, 11918), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (11855, 11918), False, 'from langchain.agents import initialize_agent\n'), ((14562, 14601), 'sqlalchemy.create_engine', 'create_engine', (['project.model.connection'], {}), '(project.model.connection)\n', (14575, 14601), False, 'from sqlalchemy import create_engine\n'), ((14626, 14645), 'llama_index.core.utilities.sql_wrapper.SQLDatabase', 'SQLDatabase', (['engine'], {}), '(engine)\n', (14637, 14645), False, 'from llama_index.core.utilities.sql_wrapper import SQLDatabase\n'), ((14934, 15012), 'llama_index.core.indices.struct_store.sql_query.NLSQLTableQueryEngine', 'NLSQLTableQueryEngine', ([], {'llm': 'model.llm', 'sql_database': 'sql_database', 'tables': 'tables'}), '(llm=model.llm, sql_database=sql_database, tables=tables)\n', (14955, 15012), False, 'from llama_index.core.indices.struct_store.sql_query import NLSQLTableQueryEngine\n'), ((15938, 15971), 'llama_index.core.selectors.LLMSingleSelector.from_defaults', 'LLMSingleSelector.from_defaults', ([], {}), '()\n', (15969, 15971), False, 'from llama_index.core.selectors import LLMSingleSelector\n'), ((16273, 16343), 'transformers.pipeline', 'pipeline', (['"""zero-shot-classification"""'], {'model': '"""facebook/bart-large-mnli"""'}), "('zero-shot-classification', model='facebook/bart-large-mnli')\n", (16281, 16343), False, 'from transformers import pipeline\n'), ((3127, 3158), 'app.models.LLMModel.model_validate', 'LLMModel.model_validate', (['llm_db'], {}), '(llm_db)\n', (3150, 3158), False, 'from app.models import LLMModel, ProjectModel, QuestionModel, ChatModel\n'), ((3357, 3382), 'app.model.Model', 'Model', (['llmName', 'llmm', 'llm'], {}), '(llmName, llmm, llm)\n', (3362, 3382), False, 'from app.model import Model\n'), ((4256, 4265), 'app.project.Project', 'Project', ([], {}), '()\n', (4263, 4265), False, 'from app.project import Project\n'), ((5635, 5687), 'llama_index.core.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'threshold'}), '(similarity_cutoff=threshold)\n', (5658, 5687), False, 'from llama_index.core.postprocessor import SimilarityPostprocessor\n'), ((9513, 9565), 'llama_index.core.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'threshold'}), '(similarity_cutoff=threshold)\n', (9536, 9565), False, 'from llama_index.core.postprocessor import SimilarityPostprocessor\n'), ((11596, 11608), 'app.llms.tools.dalle.DalleImage', 'DalleImage', ([], {}), '()\n', (11606, 11608), False, 'from app.llms.tools.dalle import DalleImage\n'), ((11622, 11644), 'app.llms.tools.stablediffusion.StableDiffusionImage', 'StableDiffusionImage', ([], {}), '()\n', (11642, 11644), False, 'from app.llms.tools.stablediffusion import StableDiffusionImage\n'), ((11658, 11673), 'app.llms.tools.describeimage.DescribeImage', 'DescribeImage', ([], {}), '()\n', (11671, 11673), False, 'from app.llms.tools.describeimage import DescribeImage\n'), ((11687, 11698), 'app.llms.tools.instantid.InstantID', 'InstantID', ([], {}), '()\n', (11696, 11698), False, 'from app.llms.tools.instantid import InstantID\n'), ((13403, 13450), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': 'sysTemplate'}), "(role='system', content=sysTemplate)\n", (13414, 13450), False, 'from llama_index.core.base.llms.types import ChatMessage\n'), ((13494, 13551), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'inferenceModel.question'}), "(role='user', content=inferenceModel.question)\n", (13505, 13551), False, 'from llama_index.core.base.llms.types import ChatMessage\n'), ((2679, 2702), 'ollama.show', 'ollama.show', (['model_name'], {}), '(model_name)\n', (2690, 2702), False, 'import ollama\n'), ((3178, 3206), 'app.tools.getLLMClass', 'getLLMClass', (['llmm.class_name'], {}), '(llmm.class_name)\n', (3189, 3206), False, 'from app.tools import getLLMClass\n'), ((4372, 4398), 'app.vectordb.vector_init', 'vector_init', (['self', 'project'], {}), '(self, project)\n', (4383, 4398), False, 'from app.vectordb import vector_init\n'), ((5203, 5325), 'llama_index.postprocessor.colbert_rerank.ColbertRerank', 'ColbertRerank', ([], {'top_n': 'k', 'model': '"""colbert-ir/colbertv2.0"""', 'tokenizer': '"""colbert-ir/colbertv2.0"""', 'keep_retrieval_score': '(True)'}), "(top_n=k, model='colbert-ir/colbertv2.0', tokenizer=\n 'colbert-ir/colbertv2.0', keep_retrieval_score=True)\n", (5216, 5325), False, 'from llama_index.postprocessor.colbert_rerank import ColbertRerank\n'), ((5473, 5527), 'llama_index.core.postprocessor.llm_rerank.LLMRerank', 'LLMRerank', ([], {'choice_batch_size': 'k', 'top_n': 'k', 'llm': 'model.llm'}), '(choice_batch_size=k, top_n=k, llm=model.llm)\n', (5482, 5527), False, 'from llama_index.core.postprocessor.llm_rerank import LLMRerank\n'), ((9053, 9175), 'llama_index.postprocessor.colbert_rerank.ColbertRerank', 'ColbertRerank', ([], {'top_n': 'k', 'model': '"""colbert-ir/colbertv2.0"""', 'tokenizer': '"""colbert-ir/colbertv2.0"""', 'keep_retrieval_score': '(True)'}), "(top_n=k, model='colbert-ir/colbertv2.0', tokenizer=\n 'colbert-ir/colbertv2.0', keep_retrieval_score=True)\n", (9066, 9175), False, 'from llama_index.postprocessor.colbert_rerank import ColbertRerank\n'), ((9351, 9405), 'llama_index.core.postprocessor.llm_rerank.LLMRerank', 'LLMRerank', ([], {'choice_batch_size': 'k', 'top_n': 'k', 'llm': 'model.llm'}), '(choice_batch_size=k, top_n=k, llm=model.llm)\n', (9360, 9405), False, 'from llama_index.core.postprocessor.llm_rerank import LLMRerank\n'), ((15841, 15907), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'description': 'entrance.description', 'name': 'entrance.name'}), '(description=entrance.description, name=entrance.name)\n', (15853, 15907), False, 'from llama_index.core.tools import ToolMetadata\n'), ((2603, 2632), 'json.loads', 'json.loads', (['llm.props.options'], {}), '(llm.props.options)\n', (2613, 2632), False, 'import json\n'), ((3209, 3233), 'json.loads', 'json.loads', (['llmm.options'], {}), '(llmm.options)\n', (3219, 3233), False, 'import json\n'), ((2878, 2901), 'ollama.pull', 'ollama.pull', (['model_name'], {}), '(model_name)\n', (2889, 2901), False, 'import ollama\n'), ((6841, 6859), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (6851, 6859), False, 'import json\n'), ((7040, 7058), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (7050, 7058), False, 'import json\n'), ((10598, 10616), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (10608, 10616), False, 'import json\n'), ((10798, 10816), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (10808, 10816), False, 'import json\n'), ((12336, 12374), 'llama_index.core.schema.ImageDocument', 'ImageDocument', ([], {'image': 'visionInput.image'}), '(image=visionInput.image)\n', (12349, 12374), False, 'from llama_index.core.schema import ImageDocument\n')] |
from llama_index.embeddings import OpenAIEmbedding
from llama_index.llms import OpenAI
from llama_index.ingestion import IngestionPipeline
from llama_index.extractors import TitleExtractor, SummaryExtractor
from llama_index.text_splitter import SentenceSplitter
from llama_index.schema import MetadataMode
# from langchain.embeddings.huggingface import HuggingFaceBgeEmbeddings
# from llama_index.embeddings import *
from llama_index.embeddings import HuggingFaceEmbedding
from llm import LLMClient
from llama_index.llms import Ollama
from llama_index import ServiceContext
from llama_index.vector_stores import AstraDBVectorStore
from llama_index import Document
from llama_index.text_splitter import TokenTextSplitter
from llama_index import set_global_service_context
from llama_index.llms import LangChainLLM
from langchain_nvidia_ai_endpoints import ChatNVIDIA
import os
import asyncio
token = os.environ['token']
api_endpoint = os.environ['api_endpoint']
def create_pipeline_astra_db(llm_type='nvidia',embed_model='local',collection_name='video_transcript'):
print("Loading Pipeline")
if embed_model=='local':
print("embed_model local")
embed_model = "BAAI/bge-base-en"
embed_model_dim = 768
embed_model = HuggingFaceEmbedding(model_name=embed_model)
elif embed_model=='nvidia':
print("embed_model nviida")
embed_model_dim = 1024
embed_model = HuggingFaceEmbedding(model_name=embed_model)
else:
print("embed_model else")
embed_model = HuggingFaceEmbedding(model_name=embed_model)
if llm_type=='nvidia':
print('llm nvidia')
nvai_llm = ChatNVIDIA(model='llama2_70b')
llm = LangChainLLM(llm=nvai_llm)
elif llm_type=='ollama':
print('llm_ollama')
llm = Ollama(model='stablelm2', temperature=0.1)
else:
print('llm else')
llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.1)
service_context = ServiceContext.from_defaults(embed_model=embed_model, llm=llm)
set_global_service_context(service_context)
astra_db_store = AstraDBVectorStore(
token=token,
api_endpoint=api_endpoint,
collection_name=collection_name,
embedding_dimension=embed_model_dim,
)
transformations = [
SentenceSplitter(chunk_size=1024, chunk_overlap=100),
TitleExtractor(llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8),
SummaryExtractor(llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8),
embed_model,
]
# text_splitter = TokenTextSplitter(chunk_size=512)
return IngestionPipeline(transformations=transformations,vector_store=astra_db_store)
# return IngestionPipeline(
# transformations=[text_splitter, embed_model],
# vector_store=astra_db_store)
def ingest_pipeline_astra_db(text,metadata=None,_async=False,collection_name=None,run_async=False):
if collection_name is not None:
pipeline = create_pipeline_astra_db(llm_type='nvidia',collection_name=collection_name)
else:
pipeline = create_pipeline_astra_db(llm_type='nvidia')
print('Pipeline_loaded')
# save
# pipeline.persist("./pipeline_storage")
# load and restore state
# will run instantly due to the cache
if metadata:
doc = [Document(text=text,metadata=metadata)]
else:
doc = [Document(text=text)]
if run_async:
pipeline.arun(doc)
else:
if _async:
# loop = asyncio.get_event_loop()
pipeline.arun(documents=doc,num_workers=4)
else:
nodes = pipeline.run(documents=doc,num_workers=1)
# pipeline.load("./pipeline_storage")
if __name__=='__main__':
ingest_pipeline_astra_db("Can you load this in asstra db, astra db is vector store using llama index") | [
"llama_index.vector_stores.AstraDBVectorStore",
"llama_index.llms.Ollama",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.llms.LangChainLLM",
"llama_index.ingestion.IngestionPipeline",
"llama_index.set_global_service_context",
"llama_index.extractors.TitleExtractor",
"llama_index.text_splitter.SentenceSplitter",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.extractors.SummaryExtractor",
"llama_index.Document"
] | [((1967, 2029), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'llm'}), '(embed_model=embed_model, llm=llm)\n', (1995, 2029), False, 'from llama_index import ServiceContext\n'), ((2034, 2077), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2060, 2077), False, 'from llama_index import set_global_service_context\n'), ((2100, 2233), 'llama_index.vector_stores.AstraDBVectorStore', 'AstraDBVectorStore', ([], {'token': 'token', 'api_endpoint': 'api_endpoint', 'collection_name': 'collection_name', 'embedding_dimension': 'embed_model_dim'}), '(token=token, api_endpoint=api_endpoint, collection_name=\n collection_name, embedding_dimension=embed_model_dim)\n', (2118, 2233), False, 'from llama_index.vector_stores import AstraDBVectorStore\n'), ((2615, 2694), 'llama_index.ingestion.IngestionPipeline', 'IngestionPipeline', ([], {'transformations': 'transformations', 'vector_store': 'astra_db_store'}), '(transformations=transformations, vector_store=astra_db_store)\n', (2632, 2694), False, 'from llama_index.ingestion import IngestionPipeline\n'), ((1254, 1298), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'embed_model'}), '(model_name=embed_model)\n', (1274, 1298), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1655, 1685), 'langchain_nvidia_ai_endpoints.ChatNVIDIA', 'ChatNVIDIA', ([], {'model': '"""llama2_70b"""'}), "(model='llama2_70b')\n", (1665, 1685), False, 'from langchain_nvidia_ai_endpoints import ChatNVIDIA\n'), ((1700, 1726), 'llama_index.llms.LangChainLLM', 'LangChainLLM', ([], {'llm': 'nvai_llm'}), '(llm=nvai_llm)\n', (1712, 1726), False, 'from llama_index.llms import LangChainLLM\n'), ((2301, 2353), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(100)'}), '(chunk_size=1024, chunk_overlap=100)\n', (2317, 2353), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((2363, 2435), 'llama_index.extractors.TitleExtractor', 'TitleExtractor', ([], {'llm': 'llm', 'metadata_mode': 'MetadataMode.EMBED', 'num_workers': '(8)'}), '(llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8)\n', (2377, 2435), False, 'from llama_index.extractors import TitleExtractor, SummaryExtractor\n'), ((2445, 2519), 'llama_index.extractors.SummaryExtractor', 'SummaryExtractor', ([], {'llm': 'llm', 'metadata_mode': 'MetadataMode.EMBED', 'num_workers': '(8)'}), '(llm=llm, metadata_mode=MetadataMode.EMBED, num_workers=8)\n', (2461, 2519), False, 'from llama_index.extractors import TitleExtractor, SummaryExtractor\n'), ((1420, 1464), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'embed_model'}), '(model_name=embed_model)\n', (1440, 1464), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1531, 1575), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'embed_model'}), '(model_name=embed_model)\n', (1551, 1575), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1798, 1840), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': '"""stablelm2"""', 'temperature': '(0.1)'}), "(model='stablelm2', temperature=0.1)\n", (1804, 1840), False, 'from llama_index.llms import Ollama\n'), ((1891, 1942), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-1106"""', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo-1106', temperature=0.1)\n", (1897, 1942), False, 'from llama_index.llms import OpenAI\n'), ((3319, 3357), 'llama_index.Document', 'Document', ([], {'text': 'text', 'metadata': 'metadata'}), '(text=text, metadata=metadata)\n', (3327, 3357), False, 'from llama_index import Document\n'), ((3383, 3402), 'llama_index.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (3391, 3402), False, 'from llama_index import Document\n')] |
#
# Graph Database Ingester For 5e SRD Content
#
import dotenv
dotenv.load_dotenv()
import json
from llama_index.core import Document, KnowledgeGraphIndex, StorageContext, VectorStoreIndex, load_index_from_storage, load_graph_from_storage
from llama_index.core.extractors import TitleExtractor, QuestionsAnsweredExtractor
from llama_index.core.query_engine.graph_query_engine import ComposableGraphQueryEngine
from llama_index.extractors.entity import EntityExtractor
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import TokenTextSplitter, MarkdownNodeParser
from llama_index.core.storage.docstore import SimpleDocumentStore
from os import path
from src.documents import loadWebPages, parseWebPage, getLinks
from src.db import graphStore, vectorStore, documentStore
from src.file_utils import createOutputFile
from src.llm import documentTitle, answerFinder, get_service_context
from src.log_utils import debug
from src.rebel import extract_triplets
from typing import Sequence
from datetime import datetime
from markdownify import markdownify as md
# 1. Download Site
failedLinks = []
TARGET = "https://www.5esrd.com"
STORAGE = "./srd-store"
def loadIndex():
context = StorageContext.from_defaults(graph_store=graphStore, vector_store=vectorStore, docstore=documentStore)
graph = load_graph_from_storage(context, 'root')
return context,graph
# context = loadIndex()
# load(TARGET, context)
# exit()
title = " Web Site Document Loader "
print("=" * len(title))
print(title)
print("=" * len(title))
print("> Creating Output File")
file = createOutputFile('./kg-output', 'srd-graph-result')
def loadWebsitesIntoGraph(url: str, context: StorageContext, links: set = set()):
file.write(f'\n### URL: {url}\n')
debug("==> Loading Web URL [url: {}]".format(url))
document = loadWebPages([url]).pop()
documentDetails: dict[str, any] = {
"id": document.doc_id,
"text": document.text[:20]
}
file.write('LlamaIndex Document Details:\n')
file.write(f'```json\n')
file.write(f'{json.dumps(documentDetails, indent=2)}')
file.write(f'\n```\n\n')
debug("====> Parsing HTML [url: {}]".format(url))
htmlDoc = parseWebPage(document.text);
file.write('HTML Document:\n')
file.write(f'```html\n')
file.write(f'{htmlDoc}')
file.write(f'\n```\n')
debug("====> Creating Document [url: {}]".format(url))
now = datetime.now()
createDate = f'{now:%Y%m%d%H%M%S}'
# markdown = md(str(htmlDoc), strip=['script']).replace('\n\n', '\n')
parsedDocument = Document(
text=htmlDoc.text,
id_=url,
metadata={ "source": url, "createdAt": createDate })
nodes = metadataExtractor([parsedDocument])
file.write('### Document Nodes w/ Metadata:\n')
file.write(f'{parsedDocument.text}\n')
context.docstore.add_documents(nodes, store_text=False)
debug("====> Loading Into Graph DB [url: {}]".format(url))
KnowledgeGraphIndex.build_index_from_nodes()
KnowledgeGraphIndex.from_documents(
documents=[parsedDocument],
service_context=get_service_context(),
kg_triplet_extract_fn=extract_triplets,
storage_context=context,
show_progress=True
)
debug("====> Getting Links [url: {}]".format(url))
webPageLinks = getLinks(htmlDoc.encode_contents(formatter="html"), url=url)
debug(f'====> Links found: {len(webPageLinks)}')
exit()
for link in webPageLinks:
try:
if (link not in links):
links.add(link)
loadWebsitesIntoGraph(link, context, links=links)
else:
debug("===> Link Already Loaded [url: {}]...Skipping".format(url))
except Exception as error:
print(f'{error}')
failedLinks.append(link)
def getDocumentStore(documents: Sequence[Document]):
dirExists = path.isdir(STORAGE)
if (dirExists):
print("==> Existing Index Found, Updating...")
context = StorageContext.from_defaults(persist_dir=STORAGE)
index = load_index_from_storage(context, service_context=get_service_context())
index.refresh(documents=documents)
return context, index
else:
print("==> No Index Found, Creating...")
# TODO => Switch to external document storage solution, possibly graph
docStore = SimpleDocumentStore()
docStore.add_documents(documents)
storageContext = StorageContext.from_defaults(docstore=docStore)
storageContext.persist(STORAGE)
index = VectorStoreIndex.from_documents(
documents,
transformations=metadataExtractor(),
storage_context=storageContext,
service_context=get_service_context(),
show_progress=True
)
storageContext.persist(STORAGE)
return storageContext, index
def getWebDocumentAndLinks(url: str, documents: list[Document] = [], links: set = set()):
# This suggests a new url search, add the original link provided
if (len(links) == 0):
links.add(url)
try:
debug("==> Loading Web URL [url: {}]".format(url))
htmlDoc = loadWebPages([url])
debug("====> Parsing HTML [url: {}]".format(url))
webPage = parseWebPage(htmlDoc[0].text)
debug("====> Creating Document [url: {}]".format(url))
documents.append(Document(text=webPage, doc_id=url))
debug("====> Getting Links [url: {}]".format(url))
webPageLinks = getLinks(htmlDoc[0].text, url=url)
for link in webPageLinks:
if (link not in links):
links.add(link)
getWebDocumentAndLinks(link, documents=documents, links=links)
else:
debug("===> Link Already Loaded [url: {}]...Skipping".format(url))
except:
failedLinks.append(url)
finally:
return documents, links
def metadataExtractor(documents: list[Document]):
# splitter = TokenTextSplitter(
# separator=" ", chunk_size=512, chunk_overlap=128
# )
splitter = MarkdownNodeParser()
titleExtractor = TitleExtractor(nodes=5, llm=documentTitle)
qaExtractor = QuestionsAnsweredExtractor(questions=3, llm=answerFinder)
entity = EntityExtractor(prediction_threshold=0.75, label_entities=True)
return IngestionPipeline(transformations=[splitter, titleExtractor, qaExtractor, entity]).run(documents=documents)
print("> Loading Index from Neo4j")
context, graph = loadIndex()
print("> Loading Web Pages")
file.write(f'**Target:** {TARGET}\n\n')
file.write("""
---
## Loading Documents
**Details on the documents loaded into the system**
""")
loadWebsitesIntoGraph(TARGET, context=context)
# queryEngine = ComposableGraphQueryEngine(graph=graph)
file.write("## Web Page Loading Result\n\n")
file.write("| Item | Count |\n")
file.write("| :-: | :-: |\n")
file.write("| Documents | {} |\n".format(len(context.docstore.docs)))
file.write("| Failed Downloads | {} |\n\n".format(len(failedLinks)))
file.write("**Failed Links**\n".format(len(failedLinks)))
file.write("```json\n[\n".format(len(failedLinks)))
for badLink in failedLinks:
file.write(" \"{}\",".format(badLink))
file.write("]\n```\n\n---\n".format(len(failedLinks)))
exit()
# 2. Create Graph Index & Store
print("> Process Documents")
storageContext, index = getDocumentStore(docs)
# 3. Make available
queryEngine = index.as_query_engine(
verbose=True,
response_mode="tree_summarize"
)
print("> Querying Data")
file.write("## Query Data\n")
questionA = "What saving throws is a Fighter proficient using?"
questionB = "How many attack dice is a greatsword?"
questionC = "Which spell does the most lighting damage regardless of class?"
file.write("Each question and it's evaluated answer\n\n")
print("==> Question A")
responseA = queryEngine.query(questionA)
file.write("```\nQuestion: {}\n\nAnswer:\n{}\n```\n\n".format(questionA, responseA))
print("==> Question B")
responseB = queryEngine.query(questionB)
file.write("```\nQuestion: {}\n\nAnswer:\n{}\n```\n\n".format(questionB, responseB))
print("==> Question C")
responseC = queryEngine.query(questionC)
file.write("```\nQuestion: {}\n\nAnswer:\n{}\n```\n\n".format(questionC, responseC))
print(" == Complete == ")
# 4. Create Flask Server | [
"llama_index.core.KnowledgeGraphIndex.build_index_from_nodes",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.extractors.QuestionsAnsweredExtractor",
"llama_index.core.ingestion.IngestionPipeline",
"llama_index.core.load_graph_from_storage",
"llama_index.core.Document",
"llama_index.core.storage.docstore.SimpleDocumentStore",
"llama_index.core.node_parser.MarkdownNodeParser",
"llama_index.core.extractors.TitleExtractor",
"llama_index.extractors.entity.EntityExtractor"
] | [((63, 83), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (81, 83), False, 'import dotenv\n'), ((1601, 1652), 'src.file_utils.createOutputFile', 'createOutputFile', (['"""./kg-output"""', '"""srd-graph-result"""'], {}), "('./kg-output', 'srd-graph-result')\n", (1617, 1652), False, 'from src.file_utils import createOutputFile\n'), ((1222, 1329), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'graph_store': 'graphStore', 'vector_store': 'vectorStore', 'docstore': 'documentStore'}), '(graph_store=graphStore, vector_store=\n vectorStore, docstore=documentStore)\n', (1250, 1329), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext, VectorStoreIndex, load_index_from_storage, load_graph_from_storage\n'), ((1335, 1375), 'llama_index.core.load_graph_from_storage', 'load_graph_from_storage', (['context', '"""root"""'], {}), "(context, 'root')\n", (1358, 1375), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext, VectorStoreIndex, load_index_from_storage, load_graph_from_storage\n'), ((2191, 2218), 'src.documents.parseWebPage', 'parseWebPage', (['document.text'], {}), '(document.text)\n', (2203, 2218), False, 'from src.documents import loadWebPages, parseWebPage, getLinks\n'), ((2399, 2413), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2411, 2413), False, 'from datetime import datetime\n'), ((2543, 2634), 'llama_index.core.Document', 'Document', ([], {'text': 'htmlDoc.text', 'id_': 'url', 'metadata': "{'source': url, 'createdAt': createDate}"}), "(text=htmlDoc.text, id_=url, metadata={'source': url, 'createdAt':\n createDate})\n", (2551, 2634), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext, VectorStoreIndex, load_index_from_storage, load_graph_from_storage\n'), ((2917, 2961), 'llama_index.core.KnowledgeGraphIndex.build_index_from_nodes', 'KnowledgeGraphIndex.build_index_from_nodes', ([], {}), '()\n', (2959, 2961), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext, VectorStoreIndex, load_index_from_storage, load_graph_from_storage\n'), ((3760, 3779), 'os.path.isdir', 'path.isdir', (['STORAGE'], {}), '(STORAGE)\n', (3770, 3779), False, 'from os import path\n'), ((5770, 5790), 'llama_index.core.node_parser.MarkdownNodeParser', 'MarkdownNodeParser', ([], {}), '()\n', (5788, 5790), False, 'from llama_index.core.node_parser import TokenTextSplitter, MarkdownNodeParser\n'), ((5811, 5853), 'llama_index.core.extractors.TitleExtractor', 'TitleExtractor', ([], {'nodes': '(5)', 'llm': 'documentTitle'}), '(nodes=5, llm=documentTitle)\n', (5825, 5853), False, 'from llama_index.core.extractors import TitleExtractor, QuestionsAnsweredExtractor\n'), ((5870, 5927), 'llama_index.core.extractors.QuestionsAnsweredExtractor', 'QuestionsAnsweredExtractor', ([], {'questions': '(3)', 'llm': 'answerFinder'}), '(questions=3, llm=answerFinder)\n', (5896, 5927), False, 'from llama_index.core.extractors import TitleExtractor, QuestionsAnsweredExtractor\n'), ((5939, 6002), 'llama_index.extractors.entity.EntityExtractor', 'EntityExtractor', ([], {'prediction_threshold': '(0.75)', 'label_entities': '(True)'}), '(prediction_threshold=0.75, label_entities=True)\n', (5954, 6002), False, 'from llama_index.extractors.entity import EntityExtractor\n'), ((3864, 3913), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'STORAGE'}), '(persist_dir=STORAGE)\n', (3892, 3913), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext, VectorStoreIndex, load_index_from_storage, load_graph_from_storage\n'), ((4207, 4228), 'llama_index.core.storage.docstore.SimpleDocumentStore', 'SimpleDocumentStore', ([], {}), '()\n', (4226, 4228), False, 'from llama_index.core.storage.docstore import SimpleDocumentStore\n'), ((4288, 4335), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'docstore': 'docStore'}), '(docstore=docStore)\n', (4316, 4335), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext, VectorStoreIndex, load_index_from_storage, load_graph_from_storage\n'), ((4947, 4966), 'src.documents.loadWebPages', 'loadWebPages', (['[url]'], {}), '([url])\n', (4959, 4966), False, 'from src.documents import loadWebPages, parseWebPage, getLinks\n'), ((5035, 5064), 'src.documents.parseWebPage', 'parseWebPage', (['htmlDoc[0].text'], {}), '(htmlDoc[0].text)\n', (5047, 5064), False, 'from src.documents import loadWebPages, parseWebPage, getLinks\n'), ((5255, 5289), 'src.documents.getLinks', 'getLinks', (['htmlDoc[0].text'], {'url': 'url'}), '(htmlDoc[0].text, url=url)\n', (5263, 5289), False, 'from src.documents import loadWebPages, parseWebPage, getLinks\n'), ((1841, 1860), 'src.documents.loadWebPages', 'loadWebPages', (['[url]'], {}), '([url])\n', (1853, 1860), False, 'from src.documents import loadWebPages, parseWebPage, getLinks\n'), ((3052, 3073), 'src.llm.get_service_context', 'get_service_context', ([], {}), '()\n', (3071, 3073), False, 'from src.llm import documentTitle, answerFinder, get_service_context\n'), ((5145, 5179), 'llama_index.core.Document', 'Document', ([], {'text': 'webPage', 'doc_id': 'url'}), '(text=webPage, doc_id=url)\n', (5153, 5179), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext, VectorStoreIndex, load_index_from_storage, load_graph_from_storage\n'), ((6013, 6099), 'llama_index.core.ingestion.IngestionPipeline', 'IngestionPipeline', ([], {'transformations': '[splitter, titleExtractor, qaExtractor, entity]'}), '(transformations=[splitter, titleExtractor, qaExtractor,\n entity])\n', (6030, 6099), False, 'from llama_index.core.ingestion import IngestionPipeline\n'), ((2058, 2095), 'json.dumps', 'json.dumps', (['documentDetails'], {'indent': '(2)'}), '(documentDetails, indent=2)\n', (2068, 2095), False, 'import json\n'), ((3975, 3996), 'src.llm.get_service_context', 'get_service_context', ([], {}), '()\n', (3994, 3996), False, 'from src.llm import documentTitle, answerFinder, get_service_context\n'), ((4543, 4564), 'src.llm.get_service_context', 'get_service_context', ([], {}), '()\n', (4562, 4564), False, 'from src.llm import documentTitle, answerFinder, get_service_context\n')] |
import streamlit as st
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core.agent import ReActAgent, AgentRunner, ReActChatFormatter
from llama_index.core.retrievers import AutoMergingRetriever
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import FunctionTool
from llama_index.postprocessor.cohere_rerank import CohereRerank
from agent.knowledge_base.retriever.mars import MarsKnowledgeBaseRetriever
from rag import weaviate_utils, mongodb_utils
# HyDE
HYDE_LLM_TEMPERATURE: float = 0.2
HYDE_LLM_MODEL: str = "gpt-3.5-turbo-0125"
# similarity search
WEAVIATE_CLASS_NAME: str = "AutoMergingDocsChunk"
RETRIEVER_HYBRID_SEARCH_ALPHA: float = 0.85 # 1 => vector search; 0 => BM25
RETRIEVER_SIMILARITY_TOP_K: int = 15
# auto-merging retrieval
AUTO_MERGING_RATION_THRESHOLD: float = 0.2
# reranking
RERANK_TOP_N: int = 3
RERANK_MODEL: str = "rerank-english-v2.0"
@st.cache_resource
def get_knowledge_base_retriever():
# HyDE
hyde_llm = OpenAI(
model=HYDE_LLM_MODEL,
temperature=HYDE_LLM_TEMPERATURE,
)
# Weaviate
weaviate_client = weaviate_utils.get_weaviate_client()
weaviate_vector_store = weaviate_utils.as_vector_store(weaviate_client, WEAVIATE_CLASS_NAME)
weaviate_index = VectorStoreIndex.from_vector_store(weaviate_vector_store)
weaviate_retriever = weaviate_index.as_retriever(
similarity_top_k=RETRIEVER_SIMILARITY_TOP_K,
vector_store_query_mode="hybrid",
alpha=RETRIEVER_HYBRID_SEARCH_ALPHA,
)
# MongoDB
mongodb_client = mongodb_utils.get_client()
mongodb_docstore = mongodb_utils.as_docstore(mongodb_client)
mongodb_storage_context = StorageContext.from_defaults(docstore=mongodb_docstore)
# auto-merging retriever
auto_merging_retriever = AutoMergingRetriever(
simple_ratio_thresh=AUTO_MERGING_RATION_THRESHOLD,
vector_retriever=weaviate_retriever,
storage_context=mongodb_storage_context,
verbose=True,
)
# reranker
reranker = CohereRerank(
top_n=RERANK_TOP_N,
model=RERANK_MODEL,
)
return MarsKnowledgeBaseRetriever.from_defaults(
hyde_llm=hyde_llm,
reranker=reranker,
retriever=auto_merging_retriever,
)
REACT_SYSTEM_HEADER = """\
You are an AI assistant called MARS that is designed to help the astronaut crew on the Aegis Athena spaceflight mission.
You are currently talking to the astronaut Wade, who is currently in the SPACECRAFT module.
Wade can only interact with the SPACECRAFT module via the ship's console.
Always start by formulating a query for retrieving relevant information from the knowledge base. This is a `Thought`. Do NOT do this: `Thought: (Implicit) I can answer without any more tools!`
Then select the knowledge_base (`Action`) and provide your query as input (`Action Input`).
Please use a valid JSON format for the Action Input. Do NOT do this {{'query': 'What commands are available?'}}.
Finally, answer the user's query using the context provided by the knowledge_base
Answer the user's query ONLY using context provided by the knowledge base and not prior knowledge.
## Example
### Conversation
User: What commands can be used to get an overview of the ship's status?
Assistant: You can use the `list` command to list all systems (using `list systems`), along with their status info.
User: Are there other things that can be listed using this command?
### Output
Thought: How do you use the `list` command?
Action: knowledge_base
Action Input: {{"query": "How do you use the `list` command?"}}
Observation: You can use the `list` command in one of two ways. Using `list systems`, which will list all systems along with status info or using `list systems`, which will list all parts, along with their corresponding part ID and status info.
Answer: Yes, you can also use `list parts` to list all parts, along with their corresponding part ID and status info.
## Current Conversation
Below is the current conversation consisting of interleaving human and assistant messages.
"""
AGENT_LLM_MODEL: str = "gpt-3.5-turbo-0125"
# AGENT_LLM_MODEL: str = "gpt-3.5-turbo-0613"
# AGENT_LLM_MODEL: str = "gpt-4-0125-preview"
AGENT_LLM_TEMPERATURE: float = 0.1
def build_agent() -> AgentRunner:
knowledge_base_retriever = get_knowledge_base_retriever()
knowledge_base_tool = FunctionTool.from_defaults(
fn=knowledge_base_retriever.retrieve_formatted,
name="knowledge_base",
description="Provides information about the Aegis Athena spaceflight mission, "
"the S.P.A.C.E.C.R.A.F.T. (command/service) module "
"and the A.P.O.L.L.O. (lunar lander) module. "
"Can be used to gather information about systems or parts."
"Use a question as input to the tool."
)
llm = OpenAI(model=AGENT_LLM_MODEL, temperature=AGENT_LLM_TEMPERATURE)
agent = ReActAgent.from_tools(
tools=[knowledge_base_tool],
llm=llm,
max_iterations=10,
verbose=True,
react_chat_formatter=ReActChatFormatter.from_defaults(
system_header=REACT_SYSTEM_HEADER,
)
)
return agent
| [
"llama_index.llms.openai.OpenAI",
"llama_index.core.VectorStoreIndex.from_vector_store",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.agent.ReActChatFormatter.from_defaults",
"llama_index.core.retrievers.AutoMergingRetriever",
"llama_index.core.tools.FunctionTool.from_defaults",
"llama_index.postprocessor.cohere_rerank.CohereRerank"
] | [((1003, 1065), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'HYDE_LLM_MODEL', 'temperature': 'HYDE_LLM_TEMPERATURE'}), '(model=HYDE_LLM_MODEL, temperature=HYDE_LLM_TEMPERATURE)\n', (1009, 1065), False, 'from llama_index.llms.openai import OpenAI\n'), ((1127, 1163), 'rag.weaviate_utils.get_weaviate_client', 'weaviate_utils.get_weaviate_client', ([], {}), '()\n', (1161, 1163), False, 'from rag import weaviate_utils, mongodb_utils\n'), ((1192, 1260), 'rag.weaviate_utils.as_vector_store', 'weaviate_utils.as_vector_store', (['weaviate_client', 'WEAVIATE_CLASS_NAME'], {}), '(weaviate_client, WEAVIATE_CLASS_NAME)\n', (1222, 1260), False, 'from rag import weaviate_utils, mongodb_utils\n'), ((1282, 1339), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['weaviate_vector_store'], {}), '(weaviate_vector_store)\n', (1316, 1339), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((1576, 1602), 'rag.mongodb_utils.get_client', 'mongodb_utils.get_client', ([], {}), '()\n', (1600, 1602), False, 'from rag import weaviate_utils, mongodb_utils\n'), ((1626, 1667), 'rag.mongodb_utils.as_docstore', 'mongodb_utils.as_docstore', (['mongodb_client'], {}), '(mongodb_client)\n', (1651, 1667), False, 'from rag import weaviate_utils, mongodb_utils\n'), ((1698, 1753), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'docstore': 'mongodb_docstore'}), '(docstore=mongodb_docstore)\n', (1726, 1753), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((1813, 1985), 'llama_index.core.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', ([], {'simple_ratio_thresh': 'AUTO_MERGING_RATION_THRESHOLD', 'vector_retriever': 'weaviate_retriever', 'storage_context': 'mongodb_storage_context', 'verbose': '(True)'}), '(simple_ratio_thresh=AUTO_MERGING_RATION_THRESHOLD,\n vector_retriever=weaviate_retriever, storage_context=\n mongodb_storage_context, verbose=True)\n', (1833, 1985), False, 'from llama_index.core.retrievers import AutoMergingRetriever\n'), ((2047, 2099), 'llama_index.postprocessor.cohere_rerank.CohereRerank', 'CohereRerank', ([], {'top_n': 'RERANK_TOP_N', 'model': 'RERANK_MODEL'}), '(top_n=RERANK_TOP_N, model=RERANK_MODEL)\n', (2059, 2099), False, 'from llama_index.postprocessor.cohere_rerank import CohereRerank\n'), ((2135, 2252), 'agent.knowledge_base.retriever.mars.MarsKnowledgeBaseRetriever.from_defaults', 'MarsKnowledgeBaseRetriever.from_defaults', ([], {'hyde_llm': 'hyde_llm', 'reranker': 'reranker', 'retriever': 'auto_merging_retriever'}), '(hyde_llm=hyde_llm, reranker=\n reranker, retriever=auto_merging_retriever)\n', (2175, 2252), False, 'from agent.knowledge_base.retriever.mars import MarsKnowledgeBaseRetriever\n'), ((4389, 4768), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'knowledge_base_retriever.retrieve_formatted', 'name': '"""knowledge_base"""', 'description': '"""Provides information about the Aegis Athena spaceflight mission, the S.P.A.C.E.C.R.A.F.T. (command/service) module and the A.P.O.L.L.O. (lunar lander) module. Can be used to gather information about systems or parts.Use a question as input to the tool."""'}), "(fn=knowledge_base_retriever.retrieve_formatted,\n name='knowledge_base', description=\n 'Provides information about the Aegis Athena spaceflight mission, the S.P.A.C.E.C.R.A.F.T. (command/service) module and the A.P.O.L.L.O. (lunar lander) module. Can be used to gather information about systems or parts.Use a question as input to the tool.'\n )\n", (4415, 4768), False, 'from llama_index.core.tools import FunctionTool\n'), ((4888, 4952), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'AGENT_LLM_MODEL', 'temperature': 'AGENT_LLM_TEMPERATURE'}), '(model=AGENT_LLM_MODEL, temperature=AGENT_LLM_TEMPERATURE)\n', (4894, 4952), False, 'from llama_index.llms.openai import OpenAI\n'), ((5120, 5187), 'llama_index.core.agent.ReActChatFormatter.from_defaults', 'ReActChatFormatter.from_defaults', ([], {'system_header': 'REACT_SYSTEM_HEADER'}), '(system_header=REACT_SYSTEM_HEADER)\n', (5152, 5187), False, 'from llama_index.core.agent import ReActAgent, AgentRunner, ReActChatFormatter\n')] |
"""
Module for setting up a Streamlit application that searches and summarizes content
from the Huberman Lab Podcast. Uses llama_index for data indexing and retrieval, and
OpenAI for text embedding and generation.
"""
import streamlit as st
import openai
from llama_index.core import (
StorageContext,
load_index_from_storage,
get_response_synthesizer,
Settings
)
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.postprocessor import SimilarityPostprocessor
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
# Configuration of the Streamlit page
st.set_page_config(
page_title="TLDHubeR",
page_icon="🧠",
layout="centered",
initial_sidebar_state="auto",
menu_items=None
)
# Markdown file path
MARKDOWN_FILE_PATH = 'docs/tldhuber_side_page.md'
def read_markdown_file(path):
"""
Reads the content of a markdown file and returns it.
Parameters:
path (str): The path to the markdown file.
Returns:
str: The content of the markdown file.
"""
with open(path, 'r', encoding='utf-8') as file:
return file.read()
# Displaying the content in the sidebar
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
st.markdown("[Get an OpenAI API key](https://platform.openai.com/account/api-keys)")
st.markdown("[View the source code](https://github.com/apeled/TLDhubeR)")
st.markdown(read_markdown_file(MARKDOWN_FILE_PATH), unsafe_allow_html=True)
openai.api_key = openai_api_key
st.title("TLDHubeR: Search and Summarize the Huberman Lab")
st.info("Hint: Are you a Hubernoob? If so, try searching for sleep!")
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "Ask me a question about my podcasts."}
]
def clear_session_state():
"""
Clears the Streamlit session state.
"""
for key in list(st.session_state.keys()):
del st.session_state[key]
@st.cache_resource(show_spinner=False)
def load_data():
"""
Loads and indexes the Huberman Lab Podcast data, initializing settings for keyword
extraction and text embedding.
Returns:
VectorStoreIndex: The loaded and indexed podcast data.
"""
with st.spinner("Loading and indexing the Huberman Lab Podcast!"):
Settings.llm = OpenAI(temperature=0.2, model="gpt-3.5-turbo-0125")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
storage_context_load = StorageContext.from_defaults(persist_dir="data")
loaded_index = load_index_from_storage(storage_context_load)
return loaded_index
def set_up_engine(loaded_index):
"""
Creates a retriever and query engine using the loaded index.
Parameters:
loaded_index (VectorStoreIndex): The loaded and indexed podcast data.
Returns:
RetrieverQueryEngine: The assembled query engine.
"""
retriever = VectorIndexRetriever(index=loaded_index, similarity_top_k=10)
response_synthesizer = get_response_synthesizer(response_mode="no_text")
simple_hube_engine = RetrieverQueryEngine.from_args(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[SimilarityPostprocessor(similarity_cutoff=0.25)]
)
return simple_hube_engine
def get_mid_video_link(link, time_stamp):
"""
Modifies a YouTube link to start at a specified time.
Parameters:
link (str): The original YouTube video link.
t (int): The start time in seconds.
Returns:
str: The modified YouTube link with the start time parameter.
"""
base_url = link.replace("www.youtube.com/watch?v=", "youtu.be/")
return f"{base_url}?t={time_stamp}"
def extract_metadata(query_response):
"""
Extracts and transforms metadata from source nodes in a query response.
Parameters:
query_response (QueryResponse): The response from a query engine.
Returns:
list[dict]: A list of transformed metadata dictionaries with modified YouTube links.
"""
metadata_list = [node.metadata for node in query_response.source_nodes]
for metadata in metadata_list:
base_link = metadata["youtube_link"]
start_time = metadata["timestamp"]
metadata["youtube_link"] = get_mid_video_link(base_link, start_time)
return metadata_list
# Main application logic
try:
if openai.api_key:
index = load_data()
engine = set_up_engine(index)
if "chat_engine" not in st.session_state:
st.session_state["chat_engine"] = index.as_chat_engine(
chat_mode="context",
system_prompt="""Respond as if you are Andrew Huberman. You should answer by
summarizing the topic from your context. Always
include a direct quote from your podcast related to
the response."""
)
if prompt := st.chat_input("Search Query"):
st.session_state["messages"].append({"role": "user", "content": prompt})
vector_response = engine.query(prompt)
meta_data = extract_metadata(vector_response)
youtube_links = [episode['youtube_link'] for episode in meta_data]
timestamps = [episode['timestamp'] for episode in meta_data]
for i, message in enumerate(st.session_state["messages"]):
with st.chat_message(message["role"], avatar="docs/andrew.jpeg" if i == 0 else None):
st.write(message["content"])
if st.session_state["messages"][-1]["role"] != "assistant":
with st.chat_message("assistant", avatar="docs/andrew.jpeg"):
with st.spinner("Thinking..."):
response = st.session_state["chat_engine"].chat(prompt)
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state["messages"].append(message)
st.video(youtube_links[0], start_time=timestamps[0])
with st.expander("See additional clips"):
unique_youtube_links = set(youtube_links[1:])
for episode in unique_youtube_links:
st.write(episode)
# Button to clear the session state
if st.button("Clear Chat History"):
clear_session_state()
except ValueError as e:
if openai.api_key:
st.error(f"An error occurred: {e}. Please check your OpenAPI key and try again.")
else:
st.warning("Enter your OpenAPI key in the sidebar.")
| [
"llama_index.llms.openai.OpenAI",
"llama_index.core.retrievers.VectorIndexRetriever",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.get_response_synthesizer",
"llama_index.core.postprocessor.SimilarityPostprocessor",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((712, 838), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""TLDHubeR"""', 'page_icon': '"""🧠"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title='TLDHubeR', page_icon='🧠', layout='centered',\n initial_sidebar_state='auto', menu_items=None)\n", (730, 838), True, 'import streamlit as st\n'), ((1684, 1743), 'streamlit.title', 'st.title', (['"""TLDHubeR: Search and Summarize the Huberman Lab"""'], {}), "('TLDHubeR: Search and Summarize the Huberman Lab')\n", (1692, 1743), True, 'import streamlit as st\n'), ((1744, 1813), 'streamlit.info', 'st.info', (['"""Hint: Are you a Hubernoob? If so, try searching for sleep!"""'], {}), "('Hint: Are you a Hubernoob? If so, try searching for sleep!')\n", (1751, 1813), True, 'import streamlit as st\n'), ((2144, 2181), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (2161, 2181), True, 'import streamlit as st\n'), ((1332, 1403), 'streamlit.text_input', 'st.text_input', (['"""OpenAI API Key"""'], {'key': '"""chatbot_api_key"""', 'type': '"""password"""'}), "('OpenAI API Key', key='chatbot_api_key', type='password')\n", (1345, 1403), True, 'import streamlit as st\n'), ((1408, 1497), 'streamlit.markdown', 'st.markdown', (['"""[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"""'], {}), "(\n '[Get an OpenAI API key](https://platform.openai.com/account/api-keys)')\n", (1419, 1497), True, 'import streamlit as st\n'), ((1497, 1570), 'streamlit.markdown', 'st.markdown', (['"""[View the source code](https://github.com/apeled/TLDhubeR)"""'], {}), "('[View the source code](https://github.com/apeled/TLDhubeR)')\n", (1508, 1570), True, 'import streamlit as st\n'), ((3132, 3193), 'llama_index.core.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'loaded_index', 'similarity_top_k': '(10)'}), '(index=loaded_index, similarity_top_k=10)\n', (3152, 3193), False, 'from llama_index.core.retrievers import VectorIndexRetriever\n'), ((3221, 3270), 'llama_index.core.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': '"""no_text"""'}), "(response_mode='no_text')\n", (3245, 3270), False, 'from llama_index.core import StorageContext, load_index_from_storage, get_response_synthesizer, Settings\n'), ((2082, 2105), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2103, 2105), True, 'import streamlit as st\n'), ((2427, 2487), 'streamlit.spinner', 'st.spinner', (['"""Loading and indexing the Huberman Lab Podcast!"""'], {}), "('Loading and indexing the Huberman Lab Podcast!')\n", (2437, 2487), True, 'import streamlit as st\n'), ((2512, 2563), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0.2)', 'model': '"""gpt-3.5-turbo-0125"""'}), "(temperature=0.2, model='gpt-3.5-turbo-0125')\n", (2518, 2563), False, 'from llama_index.llms.openai import OpenAI\n'), ((2595, 2642), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-3-small"""'}), "(model='text-embedding-3-small')\n", (2610, 2642), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((2675, 2723), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""data"""'}), "(persist_dir='data')\n", (2703, 2723), False, 'from llama_index.core import StorageContext, load_index_from_storage, get_response_synthesizer, Settings\n'), ((2747, 2792), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context_load'], {}), '(storage_context_load)\n', (2770, 2792), False, 'from llama_index.core import StorageContext, load_index_from_storage, get_response_synthesizer, Settings\n'), ((6637, 6668), 'streamlit.button', 'st.button', (['"""Clear Chat History"""'], {}), "('Clear Chat History')\n", (6646, 6668), True, 'import streamlit as st\n'), ((5218, 5247), 'streamlit.chat_input', 'st.chat_input', (['"""Search Query"""'], {}), "('Search Query')\n", (5231, 5247), True, 'import streamlit as st\n'), ((6760, 6846), 'streamlit.error', 'st.error', (['f"""An error occurred: {e}. Please check your OpenAPI key and try again."""'], {}), "(\n f'An error occurred: {e}. Please check your OpenAPI key and try again.')\n", (6768, 6846), True, 'import streamlit as st\n'), ((6860, 6912), 'streamlit.warning', 'st.warning', (['"""Enter your OpenAPI key in the sidebar."""'], {}), "('Enter your OpenAPI key in the sidebar.')\n", (6870, 6912), True, 'import streamlit as st\n'), ((3438, 3485), 'llama_index.core.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': '(0.25)'}), '(similarity_cutoff=0.25)\n', (3461, 3485), False, 'from llama_index.core.postprocessor import SimilarityPostprocessor\n'), ((5680, 5759), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {'avatar': "('docs/andrew.jpeg' if i == 0 else None)"}), "(message['role'], avatar='docs/andrew.jpeg' if i == 0 else None)\n", (5695, 5759), True, 'import streamlit as st\n'), ((5777, 5805), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (5785, 5805), True, 'import streamlit as st\n'), ((5892, 5947), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {'avatar': '"""docs/andrew.jpeg"""'}), "('assistant', avatar='docs/andrew.jpeg')\n", (5907, 5947), True, 'import streamlit as st\n'), ((5970, 5995), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (5980, 5995), True, 'import streamlit as st\n'), ((6093, 6120), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (6101, 6120), True, 'import streamlit as st\n'), ((6288, 6340), 'streamlit.video', 'st.video', (['youtube_links[0]'], {'start_time': 'timestamps[0]'}), '(youtube_links[0], start_time=timestamps[0])\n', (6296, 6340), True, 'import streamlit as st\n'), ((6367, 6402), 'streamlit.expander', 'st.expander', (['"""See additional clips"""'], {}), "('See additional clips')\n", (6378, 6402), True, 'import streamlit as st\n'), ((6563, 6580), 'streamlit.write', 'st.write', (['episode'], {}), '(episode)\n', (6571, 6580), True, 'import streamlit as st\n')] |
#! coding: utf-8
import os
from typing import List
from llama_index import ServiceContext, StorageContext, VectorStoreIndex, SimpleDirectoryReader, TreeIndex
from llama_index.node_parser import SimpleNodeParser
from llama_index.schema import BaseNode
from llama_index.text_splitter import SentenceSplitter
from build_todo.download import download
from common.config import data_dir, index_dir
from common.llm import create_llm
from common.prompt import CH_SUMMARY_PROMPT
llm = create_llm(timeout=60)
service_context = ServiceContext.from_defaults(
llm=llm,
node_parser=SimpleNodeParser.from_defaults(text_splitter=SentenceSplitter(
chunk_size=1024,
chunk_overlap=200,
)),
)
def build_nodes(data_file: str) -> List[BaseNode]:
# TODO
# data_file 是一个txt文件,请使用 SimpleDirectoryReader 和 service_context.node_parser 把一个文件解析成List[BaseNode],
# https://docs.llamaindex.ai/en/stable/understanding/loading/loading.html#parsing-documents-into-nodes
raise NotImplementedError
def build_index(index_file: str, data_file: str):
if os.path.exists(index_file):
return
nodes = build_nodes(data_file)
storage_context = StorageContext.from_defaults()
# TODO
# 基于 nodes 构建 VectorStoreIndex 和 TreeIndex 索引,并统一保存到 storage_context
# https://docs.llamaindex.ai/en/stable/understanding/indexing/indexing.html#using-vector-store-index
raise NotImplementedError
def download_and_build_index(title: str, data_dir: str, index_dir: str):
data_file = download(title, data_dir)
build_index(index_file=os.path.join(index_dir, os.path.relpath(data_file, data_dir)), data_file=data_file)
def build_all():
raise NotImplementedError
if __name__ == '__main__':
build_all()
| [
"llama_index.text_splitter.SentenceSplitter",
"llama_index.StorageContext.from_defaults"
] | [((482, 504), 'common.llm.create_llm', 'create_llm', ([], {'timeout': '(60)'}), '(timeout=60)\n', (492, 504), False, 'from common.llm import create_llm\n'), ((1072, 1098), 'os.path.exists', 'os.path.exists', (['index_file'], {}), '(index_file)\n', (1086, 1098), False, 'import os\n'), ((1172, 1202), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (1200, 1202), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex, SimpleDirectoryReader, TreeIndex\n'), ((1513, 1538), 'build_todo.download.download', 'download', (['title', 'data_dir'], {}), '(title, data_dir)\n', (1521, 1538), False, 'from build_todo.download import download\n'), ((627, 679), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(200)'}), '(chunk_size=1024, chunk_overlap=200)\n', (643, 679), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((1590, 1626), 'os.path.relpath', 'os.path.relpath', (['data_file', 'data_dir'], {}), '(data_file, data_dir)\n', (1605, 1626), False, 'import os\n')] |
from llama_index.chat_engine.types import StreamingAgentChatResponse
import streamlit as st
from llama_index import SimpleDirectoryReader, VectorStoreIndex
# TODO: this is caching the resource globally, not per-session
# Each user session should have their own index
@st.cache_resource(show_spinner="Indexing documents...")
def build_index(files):
documents = SimpleDirectoryReader(input_files=files).load_data()
return VectorStoreIndex.from_documents(documents)
def handle_stream(root, stream: StreamingAgentChatResponse):
text = ""
root.markdown("Thinking...")
for token in stream.response_gen:
text += token
root.markdown(text)
return text
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader"
] | [((270, 325), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '"""Indexing documents..."""'}), "(show_spinner='Indexing documents...')\n", (287, 325), True, 'import streamlit as st\n'), ((430, 472), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (461, 472), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((366, 406), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'files'}), '(input_files=files)\n', (387, 406), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n')] |
from pathlib import Path
from typing import Dict, Iterator, List, cast
from llama_index import ServiceContext, StorageContext
from llama_index.embeddings.cohereai import CohereEmbedding
from llama_index.indices.loading import load_index_from_storage
from llama_index.indices.vector_store import VectorStoreIndex
from llama_index.indices.vector_store.retrievers import VectorIndexRetriever
from llama_index.llms import ChatMessage, MessageRole, OpenAI
from llama_index.node_parser import SimpleNodeParser
from llama_index.response_synthesizers import ResponseMode, get_response_synthesizer
from llama_index.schema import NodeWithScore
from pydantic import BaseModel, Field
class ParentalLeaveStatuteQuery(BaseModel):
"""Represents a query for the parental leave statute."""
question: str = Field(description="The question to ask the parental leave statute.")
"""The question to ask the parental leave statute."""
situational_context: Dict[str, str] = Field(default_factory=dict)
"""User's situational context as key-value pairs.
The keys are the names of the situational context variables and the values are the
values of the situational context variables. The names are descriptions like
"arbejdsforhold" and "arbejdstimer" and the values are the actual values like
"lønmodtager" and "37 timer om ugen".
"""
class ParentalLeaveStatuteQAEngine:
"""Represents a question-answering engine for the parental leave statute."""
def __init__(
self,
index_dir: Path,
cohere_api_key: str,
openai_api_key: str,
llm_model_name: str = "gpt-4",
) -> None:
# TODO: Refactor this.
self._llm = OpenAI(
api_key=openai_api_key,
model=llm_model_name,
temperature=0.0,
)
self._messages = [
ChatMessage(
role=MessageRole.SYSTEM,
content=(
"Dit navn er Lærbar. Du er jura-professor, "
"der er ekspert i barselsloven. Du hjælper folk med "
"at forstå barselsloven og besvare spørgsmål om barselsloven. "
"Dine svar er baseret på tekst-fraser citeret direkte "
"fra barselsloven."
),
)
]
embed_model = CohereEmbedding(
cohere_api_key=cohere_api_key,
model_name="embed-multilingual-v3.0",
input_type="search_query",
)
node_parser: SimpleNodeParser = SimpleNodeParser.from_defaults(
chunk_size=512,
chunk_overlap=10,
)
self._service_context: ServiceContext = ServiceContext.from_defaults(
llm=None,
embed_model=embed_model,
node_parser=node_parser,
)
base_index = load_index_from_storage(
storage_context=StorageContext.from_defaults(persist_dir=str(index_dir)),
service_context=self._service_context,
)
self._vector_index: VectorStoreIndex = cast(VectorStoreIndex, base_index)
# Configure the response mode so the retriever only returns the nodes
# without sending the retreived nodes to an LLM.
# https://docs.llamaindex.ai/en/stable/module_guides/querying/response_synthesizers/root.html#configuring-the-response-mode
response_synthesizer = get_response_synthesizer(
response_mode=ResponseMode.NO_TEXT,
service_context=self._service_context,
)
base_retriever = self._vector_index.as_retriever(
service_context=self._service_context,
response_synthesizer=response_synthesizer,
)
self._retriever: VectorIndexRetriever = cast(
VectorIndexRetriever, base_retriever
)
def run(self, query: ParentalLeaveStatuteQuery) -> Iterator[str]:
query_for_retriever = self._build_query_for_retriever(query=query)
retrieved_nodes = self._retriever.retrieve(
str_or_query_bundle=query_for_retriever,
)
llm_prompt = self._build_llm_prompt(
query=query, retrieved_nodes=retrieved_nodes
)
print(llm_prompt)
for item in self._stream_llm_response(llm_prompt=llm_prompt):
yield item
yield "\n\n### Kilder\n\n"
for item in self._stream_retreived_nodes(retrieved_nodes=retrieved_nodes):
yield item
def _stream_retreived_nodes(
self, retrieved_nodes: List[NodeWithScore]
) -> Iterator[str]:
for source_node in retrieved_nodes:
# source_text_fmt = source_node.node.get_content(metadata_mode=MetadataMode.ALL).strip()
reference = source_node.node.metadata["Reference"]
chapter_no = source_node.node.metadata["Kapitel nummer"]
chapter_title = source_node.node.metadata["Kapitel overskrift"]
is_paragraph = source_node.node.metadata.get("Type", "") == "Paragraf"
short_guid = source_node.node_id.split("-")[0]
yield f"**Kapitel {chapter_no}: {chapter_title}."
if is_paragraph:
yield f" Paragraf: {reference}"
else:
yield f" {reference}"
yield f"** [{short_guid}]\n\n"
yield f"{source_node.node.get_content().strip()}\n\n"
def _stream_llm_response(self, llm_prompt: str) -> Iterator[str]:
"""Query the LLM and stream the response.
Args:
llm_prompt (str): The prompt for the LLM.
Yields:
Iterator[str]: The response from the LLM.
"""
self._messages.append(
ChatMessage(
role=MessageRole.USER,
content=llm_prompt,
)
)
llm_completion_resp = self._llm.stream_chat(
messages=self._messages,
)
full_response = ""
for chunk in llm_completion_resp:
chunk_text = chunk.delta
full_response += chunk_text
yield chunk_text
self._messages.append(
ChatMessage(
role=MessageRole.ASSISTANT,
content=full_response,
)
)
print(f"Full response:\n\n{full_response}")
def _build_llm_prompt(
self, query: ParentalLeaveStatuteQuery, retrieved_nodes: List[NodeWithScore]
) -> str:
"""Build the prompt for the query."""
prompt = ""
prompt += "Du får et spørgsmål fra en person, hvis situation ser sådan ud:\n\n"
for key, value in query.situational_context.items():
prompt += f" - {key}: {value}\n"
prompt += "\n"
prompt += "Personen stiller flg. spørgsmål:\n\n"
prompt += f"{query.question}\n\n"
if len(retrieved_nodes) > 0:
prompt += "## Kilder\n\n"
prompt += "Et opslag i barselsloven giver flg. tekster.\n\n"
for source_node in retrieved_nodes:
reference = source_node.node.metadata["Reference"]
chapter_no = source_node.node.metadata["Kapitel nummer"]
chapter_title = source_node.node.metadata["Kapitel overskrift"]
is_paragraph = source_node.node.metadata.get("Type", "") == "Paragraf"
short_guid = source_node.node_id.split("-")[0]
source_text = (
f"### [{short_guid}] Kapitel {chapter_no}: {chapter_title}."
)
if is_paragraph:
source_text += f" Paragraf: {reference}"
else:
source_text += f" {reference}"
prompt += f"{source_text}\n\n"
prompt += f"{source_node.node.get_content().strip()}\n\n"
prompt += "Din opgave er bevare konteksten fra spørgsmålet og svare på spørgsmålet med en kort tekst. "
prompt += "Dit svar skal altid inkludere en eller flere referencer fra Kilder-sektionen.\n"
return prompt
def _build_query_for_retriever(self, query: ParentalLeaveStatuteQuery) -> str:
"""Build the query for the retriever.
The query is the question with the situational context as a prefix.
Args:
query (ParentalLeaveStatuteQuery): The query.
Returns:
str: The query for the retriever.
"""
question_with_context = ""
if len(query.situational_context) > 0:
question_with_context += "Min nuværende situtation er:\n"
for key, value in query.situational_context.items():
question_with_context += f" - {key}: {value}\n"
question_with_context += "\n"
question_with_context += "Mit spørgsmål er:\n"
question_with_context += query.question
return question_with_context
| [
"llama_index.embeddings.cohereai.CohereEmbedding",
"llama_index.response_synthesizers.get_response_synthesizer",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.llms.ChatMessage",
"llama_index.node_parser.SimpleNodeParser.from_defaults"
] | [((801, 869), 'pydantic.Field', 'Field', ([], {'description': '"""The question to ask the parental leave statute."""'}), "(description='The question to ask the parental leave statute.')\n", (806, 869), False, 'from pydantic import BaseModel, Field\n'), ((971, 998), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (976, 998), False, 'from pydantic import BaseModel, Field\n'), ((1694, 1763), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'api_key': 'openai_api_key', 'model': 'llm_model_name', 'temperature': '(0.0)'}), '(api_key=openai_api_key, model=llm_model_name, temperature=0.0)\n', (1700, 1763), False, 'from llama_index.llms import ChatMessage, MessageRole, OpenAI\n'), ((2336, 2452), 'llama_index.embeddings.cohereai.CohereEmbedding', 'CohereEmbedding', ([], {'cohere_api_key': 'cohere_api_key', 'model_name': '"""embed-multilingual-v3.0"""', 'input_type': '"""search_query"""'}), "(cohere_api_key=cohere_api_key, model_name=\n 'embed-multilingual-v3.0', input_type='search_query')\n", (2351, 2452), False, 'from llama_index.embeddings.cohereai import CohereEmbedding\n'), ((2536, 2600), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': '(512)', 'chunk_overlap': '(10)'}), '(chunk_size=512, chunk_overlap=10)\n', (2566, 2600), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((2685, 2778), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=None, embed_model=embed_model, node_parser\n =node_parser)\n', (2713, 2778), False, 'from llama_index import ServiceContext, StorageContext\n'), ((3063, 3097), 'typing.cast', 'cast', (['VectorStoreIndex', 'base_index'], {}), '(VectorStoreIndex, base_index)\n', (3067, 3097), False, 'from typing import Dict, Iterator, List, cast\n'), ((3397, 3500), 'llama_index.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': 'ResponseMode.NO_TEXT', 'service_context': 'self._service_context'}), '(response_mode=ResponseMode.NO_TEXT,\n service_context=self._service_context)\n', (3421, 3500), False, 'from llama_index.response_synthesizers import ResponseMode, get_response_synthesizer\n'), ((3756, 3798), 'typing.cast', 'cast', (['VectorIndexRetriever', 'base_retriever'], {}), '(VectorIndexRetriever, base_retriever)\n', (3760, 3798), False, 'from typing import Dict, Iterator, List, cast\n'), ((1851, 2133), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': '"""Dit navn er Lærbar. Du er jura-professor, der er ekspert i barselsloven. Du hjælper folk med at forstå barselsloven og besvare spørgsmål om barselsloven. Dine svar er baseret på tekst-fraser citeret direkte fra barselsloven."""'}), "(role=MessageRole.SYSTEM, content=\n 'Dit navn er Lærbar. Du er jura-professor, der er ekspert i barselsloven. Du hjælper folk med at forstå barselsloven og besvare spørgsmål om barselsloven. Dine svar er baseret på tekst-fraser citeret direkte fra barselsloven.'\n )\n", (1862, 2133), False, 'from llama_index.llms import ChatMessage, MessageRole, OpenAI\n'), ((5686, 5740), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'llm_prompt'}), '(role=MessageRole.USER, content=llm_prompt)\n', (5697, 5740), False, 'from llama_index.llms import ChatMessage, MessageRole, OpenAI\n'), ((6119, 6181), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'full_response'}), '(role=MessageRole.ASSISTANT, content=full_response)\n', (6130, 6181), False, 'from llama_index.llms import ChatMessage, MessageRole, OpenAI\n')] |
import chromadb
from chromadb.config import Settings
from llama_index.vector_stores import ChromaVectorStore
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding
from node__parser import nodes
from web.app import Test
db2 = chromadb.PersistentClient("src/data")
collection = db2.get_collection(name="embedding_vector")
embed_model = LangchainEmbedding(
HuggingFaceEmbeddings(model_name="ai-forever/sbert_large_nlu_ru")
)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
vector_store = ChromaVectorStore(chroma_collection=collection)
index = VectorStoreIndex.from_vector_store(
vector_store,
service_context=service_context,
)
index.insert_nodes(nodes)
query_engine = index.as_query_engine()
response = query_engine.query(Test)
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.ChromaVectorStore"
] | [((384, 421), 'chromadb.PersistentClient', 'chromadb.PersistentClient', (['"""src/data"""'], {}), "('src/data')\n", (409, 421), False, 'import chromadb\n'), ((608, 661), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (636, 661), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((682, 729), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'collection'}), '(chroma_collection=collection)\n', (699, 729), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((739, 825), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (773, 825), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((520, 585), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""ai-forever/sbert_large_nlu_ru"""'}), "(model_name='ai-forever/sbert_large_nlu_ru')\n", (541, 585), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n')] |
import os
import requests
from dotenv import load_dotenv
from llama_index import SimpleDirectoryReader, StorageContext, VectorStoreIndex, load_index_from_storage
from pathlib import Path
import helper.data as dhelp
load_dotenv()
LOCAL_FILE_NAME = 'data/TR-61850.pdf'
INDEX_STORAGE_DIR = './storage'
def build_index() -> VectorStoreIndex:
"""
Load data into a vector store and build the index.
:return: The vector store index
"""
# Using the previously identified optimal settings
service_context = dhelp.get_service_context(chunk_size=512, chunk_overlap=75)
try:
print('Loading index from storage...')
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=INDEX_STORAGE_DIR)
# load index
index = load_index_from_storage(
storage_context,
service_context=service_context
)
except Exception as ex:
print(f'Building index from scratch because of exception: {ex}')
if not os.path.exists(LOCAL_FILE_NAME) or os.path.isfile(LOCAL_FILE_NAME):
filename = Path(LOCAL_FILE_NAME)
url = 'https://www.fit.vut.cz/research/publication-file/11832/TR-61850.pdf'
response = requests.get(url)
filename.write_bytes(response.content)
documents = SimpleDirectoryReader(
input_files=[LOCAL_FILE_NAME]
).load_data()
index = VectorStoreIndex.from_documents(
documents,
service_context=service_context,
show_progress=True
)
index.storage_context.persist(INDEX_STORAGE_DIR)
return index
if __name__ == '__main__':
build_index()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((219, 232), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (230, 232), False, 'from dotenv import load_dotenv\n'), ((531, 590), 'helper.data.get_service_context', 'dhelp.get_service_context', ([], {'chunk_size': '(512)', 'chunk_overlap': '(75)'}), '(chunk_size=512, chunk_overlap=75)\n', (556, 590), True, 'import helper.data as dhelp\n'), ((708, 767), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'INDEX_STORAGE_DIR'}), '(persist_dir=INDEX_STORAGE_DIR)\n', (736, 767), False, 'from llama_index import SimpleDirectoryReader, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((805, 878), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (828, 878), False, 'from llama_index import SimpleDirectoryReader, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((1447, 1546), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context', 'show_progress': '(True)'}), '(documents, service_context=service_context,\n show_progress=True)\n', (1478, 1546), False, 'from llama_index import SimpleDirectoryReader, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((1065, 1096), 'os.path.isfile', 'os.path.isfile', (['LOCAL_FILE_NAME'], {}), '(LOCAL_FILE_NAME)\n', (1079, 1096), False, 'import os\n'), ((1121, 1142), 'pathlib.Path', 'Path', (['LOCAL_FILE_NAME'], {}), '(LOCAL_FILE_NAME)\n', (1125, 1142), False, 'from pathlib import Path\n'), ((1254, 1271), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1266, 1271), False, 'import requests\n'), ((1030, 1061), 'os.path.exists', 'os.path.exists', (['LOCAL_FILE_NAME'], {}), '(LOCAL_FILE_NAME)\n', (1044, 1061), False, 'import os\n'), ((1344, 1396), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[LOCAL_FILE_NAME]'}), '(input_files=[LOCAL_FILE_NAME])\n', (1365, 1396), False, 'from llama_index import SimpleDirectoryReader, StorageContext, VectorStoreIndex, load_index_from_storage\n')] |
from approaches.index.store.cosmos_index_store import CosmosIndexStore
from llama_index import StorageContext
from approaches.index.store.cosmos_doc_store import CosmosDocumentStore
from llama_index import load_index_from_storage
import os
import openai
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from llama_index import LangchainEmbedding
from llama_index.vector_stores import QdrantVectorStore
from llama_index import (
LLMPredictor,
ServiceContext
)
from llama_index.node_parser import SimpleNodeParser
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index import SimpleDirectoryReader, Document
from llama_index.indices.knowledge_graph.base import GPTKnowledgeGraphIndex
import qdrant_client
from dotenv import load_dotenv
load_dotenv()
AZURE_INDEX_STORAGE_CONNECTION_STRING = os.environ.get("AZURE_INDEX_STORAGE_CONNECTION_STRING") or None
AZURE_QDRANT_HOST = os.environ.get("AZURE_QDRANT_HOST") or None
AZURE_OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_BASE")
AZURE_OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY_SOUTH_CENTRAL_US")
AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.environ.get("AZURE_OPENAI_CHATGPT_DEPLOYMENT")
openai.api_type = "azure"
openai.api_base = AZURE_OPENAI_API_BASE
openai.api_version = "2023-03-15-preview"
os.environ["OPENAI_API_KEY"] = str(AZURE_OPENAI_API_KEY)
openai.api_key = AZURE_OPENAI_API_KEY
class GPTKGIndexer:
def __init__(self):
if AZURE_INDEX_STORAGE_CONNECTION_STRING is None or AZURE_QDRANT_HOST is None:
return
self._connection_string = AZURE_INDEX_STORAGE_CONNECTION_STRING
self._index_store = CosmosIndexStore.from_uri(uri=str(self._connection_string), db_name="kg_index")
self._doc_store = CosmosDocumentStore.from_uri(uri=str(self._connection_string), db_name = "doc_store")
self._storage_context = StorageContext.from_defaults(
docstore=self._doc_store,
index_store=self._index_store)
self._llm = AzureChatOpenAI(deployment_name=str(AZURE_OPENAI_CHATGPT_DEPLOYMENT),
openai_api_key=openai.api_key,
openai_api_base=openai.api_base,
openai_api_type=openai.api_type,
openai_api_version=openai.api_version,
temperature=0.0
)
llm_predictor = LLMPredictor(llm=self._llm)
self._embedding_llm = LangchainEmbedding(
OpenAIEmbeddings(
model="text-embedding-ada-002",
deployment="text-embedding-ada-002",
openai_api_key= openai.api_key,
openai_api_base=openai.api_base,
openai_api_type=openai.api_type,
openai_api_version=openai.api_version,
),
embed_batch_size=1,
)
self._service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size_limit=1024)
try:
print("Loading index from storage")
self.index = load_index_from_storage(storage_context=self._storage_context, service_context = self._service_context)
print("Index loaded from storage")
except:
print("Initializing new index")
self.index = self._init_index()
print("Initialized new index")
def add_document(self, fileContent: str):
text_splitter = TokenTextSplitter(separator=" ", chunk_size=2048, chunk_overlap=20)
text_chunks = text_splitter.split_text(fileContent)
doc_chunks = [Document(t) for t in text_chunks]
for doc_chunk in doc_chunks:
self.index.insert(doc_chunk)
def query(self, question: str):
query_engine = self.index.as_query_engine(
include_text=False,
response_mode="tree_summarize"
)
response = query_engine.query(question)
return response
def _init_index(self):
self.index = GPTKnowledgeGraphIndex(
[],
service_context=self._service_context,
storage_context=self._storage_context
) | [
"llama_index.indices.knowledge_graph.base.GPTKnowledgeGraphIndex",
"llama_index.langchain_helpers.text_splitter.TokenTextSplitter",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.Document"
] | [((832, 845), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (843, 845), False, 'from dotenv import load_dotenv\n'), ((1039, 1074), 'os.environ.get', 'os.environ.get', (['"""AZURE_OPENAI_BASE"""'], {}), "('AZURE_OPENAI_BASE')\n", (1053, 1074), False, 'import os\n'), ((1098, 1153), 'os.environ.get', 'os.environ.get', (['"""AZURE_OPENAI_API_KEY_SOUTH_CENTRAL_US"""'], {}), "('AZURE_OPENAI_API_KEY_SOUTH_CENTRAL_US')\n", (1112, 1153), False, 'import os\n'), ((1188, 1237), 'os.environ.get', 'os.environ.get', (['"""AZURE_OPENAI_CHATGPT_DEPLOYMENT"""'], {}), "('AZURE_OPENAI_CHATGPT_DEPLOYMENT')\n", (1202, 1237), False, 'import os\n'), ((887, 942), 'os.environ.get', 'os.environ.get', (['"""AZURE_INDEX_STORAGE_CONNECTION_STRING"""'], {}), "('AZURE_INDEX_STORAGE_CONNECTION_STRING')\n", (901, 942), False, 'import os\n'), ((971, 1006), 'os.environ.get', 'os.environ.get', (['"""AZURE_QDRANT_HOST"""'], {}), "('AZURE_QDRANT_HOST')\n", (985, 1006), False, 'import os\n'), ((1918, 2008), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'docstore': 'self._doc_store', 'index_store': 'self._index_store'}), '(docstore=self._doc_store, index_store=self.\n _index_store)\n', (1946, 2008), False, 'from llama_index import StorageContext\n'), ((2366, 2393), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'self._llm'}), '(llm=self._llm)\n', (2378, 2393), False, 'from llama_index import LLMPredictor, ServiceContext\n'), ((2866, 2951), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size_limit': '(1024)'}), '(llm_predictor=llm_predictor, chunk_size_limit=1024\n )\n', (2894, 2951), False, 'from llama_index import LLMPredictor, ServiceContext\n'), ((3411, 3478), 'llama_index.langchain_helpers.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'separator': '""" """', 'chunk_size': '(2048)', 'chunk_overlap': '(20)'}), "(separator=' ', chunk_size=2048, chunk_overlap=20)\n", (3428, 3478), False, 'from llama_index.langchain_helpers.text_splitter import TokenTextSplitter\n'), ((3968, 4076), 'llama_index.indices.knowledge_graph.base.GPTKnowledgeGraphIndex', 'GPTKnowledgeGraphIndex', (['[]'], {'service_context': 'self._service_context', 'storage_context': 'self._storage_context'}), '([], service_context=self._service_context,\n storage_context=self._storage_context)\n', (3990, 4076), False, 'from llama_index.indices.knowledge_graph.base import GPTKnowledgeGraphIndex\n'), ((2457, 2691), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""', 'deployment': '"""text-embedding-ada-002"""', 'openai_api_key': 'openai.api_key', 'openai_api_base': 'openai.api_base', 'openai_api_type': 'openai.api_type', 'openai_api_version': 'openai.api_version'}), "(model='text-embedding-ada-002', deployment=\n 'text-embedding-ada-002', openai_api_key=openai.api_key,\n openai_api_base=openai.api_base, openai_api_type=openai.api_type,\n openai_api_version=openai.api_version)\n", (2473, 2691), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((3033, 3138), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'self._storage_context', 'service_context': 'self._service_context'}), '(storage_context=self._storage_context,\n service_context=self._service_context)\n', (3056, 3138), False, 'from llama_index import load_index_from_storage\n'), ((3561, 3572), 'llama_index.Document', 'Document', (['t'], {}), '(t)\n', (3569, 3572), False, 'from llama_index import SimpleDirectoryReader, Document\n')] |
"""
Chat with transcripts stored by Llamaindex in Weaviate Cloud
"""
import openai
import streamlit as st
import weaviate
from llama_index import ServiceContext
from llama_index import VectorStoreIndex
from llama_index.llms import OpenAI
from llama_index.vector_stores import WeaviateVectorStore
st.set_page_config(
page_title="Chat with my Youtube Channel",
page_icon="🦙",
layout="centered",
initial_sidebar_state="auto",
menu_items=None,
)
openai.api_key = st.secrets["OPENAI_API_KEY"]
weaviate_index = "LlamaIndex"
def init_session_state():
if "messages" not in st.session_state.keys():
st.session_state["messages"] = [
{"role": "assistant", "content": "Ask me a question"}
]
@st.cache_resource(show_spinner=False)
def load_weaviate_client():
client = weaviate.Client(
url=st.secrets["WEAVIATE_URL"],
auth_client_secret=weaviate.AuthApiKey(api_key=st.secrets["WEAVIATE_API_KEY"]),
additional_headers={"X-OpenAI-Api-Key": st.secrets["OPENAI_API_KEY"]},
)
return client
@st.cache_resource(show_spinner=False)
def load_weaviate_llamaindex(index_name: str):
weaviate_client = load_weaviate_client()
if not weaviate_client.schema.exists(index_name):
st.error(
f"Index / Weaviate class {index_name} does not exist. Please build it beforehand"
)
st.stop()
vector_store = WeaviateVectorStore(
weaviate_client=weaviate_client, index_name=index_name
)
service_context = ServiceContext.from_defaults(
llm=OpenAI(
model="gpt-3.5-turbo",
temperature=0.2,
system_prompt="You are an expert on the Streamlit Python library. Your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts. Do not hallucinate features.",
)
)
loaded_index = VectorStoreIndex.from_vector_store(
vector_store, service_context=service_context
)
return loaded_index
def display_chat_history():
for message in st.session_state["messages"]:
with st.chat_message(message["role"]):
st.write(message["content"])
st.header("Chat with my Youtube Channel")
init_session_state()
index = load_weaviate_llamaindex(weaviate_index)
chat_engine = index.as_chat_engine(
chat_mode="condense_question", verbose=True
)
# Prompt for user input and save to chat history
if prompt := st.chat_input("Your question"):
st.session_state.messages.append(
{"role": "user", "content": prompt}
)
# Display the prior chat messages
display_chat_history()
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = chat_engine.chat(prompt)
st.write(response.response)
message = {
"role": "assistant",
"content": response.response
}
st.session_state.messages.append(message) # Add response to message history
| [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.llms.OpenAI",
"llama_index.vector_stores.WeaviateVectorStore"
] | [((297, 443), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with my Youtube Channel"""', 'page_icon': '"""🦙"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title='Chat with my Youtube Channel', page_icon='🦙',\n layout='centered', initial_sidebar_state='auto', menu_items=None)\n", (315, 443), True, 'import streamlit as st\n'), ((739, 776), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (756, 776), True, 'import streamlit as st\n'), ((1069, 1106), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1086, 1106), True, 'import streamlit as st\n'), ((2235, 2276), 'streamlit.header', 'st.header', (['"""Chat with my Youtube Channel"""'], {}), "('Chat with my Youtube Channel')\n", (2244, 2276), True, 'import streamlit as st\n'), ((1414, 1489), 'llama_index.vector_stores.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'weaviate_client', 'index_name': 'index_name'}), '(weaviate_client=weaviate_client, index_name=index_name)\n', (1433, 1489), False, 'from llama_index.vector_stores import WeaviateVectorStore\n'), ((1946, 2032), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (1980, 2032), False, 'from llama_index import VectorStoreIndex\n'), ((2498, 2528), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (2511, 2528), True, 'import streamlit as st\n'), ((2534, 2603), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (2566, 2603), True, 'import streamlit as st\n'), ((594, 617), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (615, 617), True, 'import streamlit as st\n'), ((1262, 1363), 'streamlit.error', 'st.error', (['f"""Index / Weaviate class {index_name} does not exist. Please build it beforehand"""'], {}), "(\n f'Index / Weaviate class {index_name} does not exist. Please build it beforehand'\n )\n", (1270, 1363), True, 'import streamlit as st\n'), ((1384, 1393), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (1391, 1393), True, 'import streamlit as st\n'), ((2804, 2832), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2819, 2832), True, 'import streamlit as st\n'), ((902, 961), 'weaviate.AuthApiKey', 'weaviate.AuthApiKey', ([], {'api_key': "st.secrets['WEAVIATE_API_KEY']"}), "(api_key=st.secrets['WEAVIATE_API_KEY'])\n", (921, 961), False, 'import weaviate\n'), ((1568, 1883), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.2)', 'system_prompt': '"""You are an expert on the Streamlit Python library. Your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts. Do not hallucinate features."""'}), "(model='gpt-3.5-turbo', temperature=0.2, system_prompt=\n 'You are an expert on the Streamlit Python library. Your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts. Do not hallucinate features.'\n )\n", (1574, 1883), False, 'from llama_index.llms import OpenAI\n'), ((2158, 2190), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2173, 2190), True, 'import streamlit as st\n'), ((2204, 2232), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2212, 2232), True, 'import streamlit as st\n'), ((2847, 2872), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (2857, 2872), True, 'import streamlit as st\n'), ((2934, 2961), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2942, 2961), True, 'import streamlit as st\n'), ((3095, 3136), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (3127, 3136), True, 'import streamlit as st\n')] |
# Debug stuff
import os
import readline
print("Current Working Directory:", os.getcwd())
#env_var = os.getenv('OPENAI_API_KEY')
#print(env_var)
# Sets llama-index
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("python/data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print("---------------------")
print(response)
print("---------------------")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader"
] | [((191, 250), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (210, 250), False, 'import logging\n'), ((459, 501), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (490, 501), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((76, 87), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (85, 87), False, 'import os\n'), ((282, 322), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (303, 322), False, 'import logging\n'), ((251, 270), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (268, 270), False, 'import logging\n'), ((402, 438), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""python/data"""'], {}), "('python/data')\n", (423, 438), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n')] |
import base64
import nest_asyncio
import streamlit as st
from llama_index import (LLMPredictor, ServiceContext, StorageContext,
load_index_from_storage, set_global_service_context)
from llama_index.llms import LlamaCPP
from llama_index.memory import ChatMemoryBuffer
nest_asyncio.apply()
# https://scontent.fuio13-1.fna.fbcdn.net/v/t1.6435-9/120996839_10158988589315921_8361322757228136300_n.jpg?_nc_cat=102&ccb=1-7&_nc_sid=e3f864&_nc_eui2=AeHUhll5E2tIEGzo7Jg--N8eBIvqh7KHjPwEi-qHsoeM_KW47QBC1Y_5XRVf1xtDOhB6KzK5neZo5z5VTNLJZGFO&_nc_ohc=HZNG6BSyHqcAX8Q68Xm&_nc_ht=scontent.fuio13-1.fna&oh=00_AfDmXURCX1gV_1sK893NnvfD4T_LfXpZlDMa3xqSWZziVw&oe=653279B1
st.set_page_config(page_title="UTPL Chat", page_icon="🤖", layout="wide", )
@st.cache_data
def get_img_as_base64(file):
with open(file, "rb") as f:
data = f.read()
return base64.b64encode(data).decode()
img = get_img_as_base64("utpl.jpeg")
st.markdown(f"""
<style>
.stApp {{
background-color: black;
background-attachment: fixed;
background-size: cover}}
</style>
""", unsafe_allow_html=True)
st.title("UTPL Chat 🤖")
# /Users/becario/Library/Caches/llama_index/models/llama-2-70b-chat.ggmlv3.q4_K_S.bin
llm = LlamaCPP(
model_path="./models/llama-2-13b-chat.Q4_0.gguf",
temperature=0,
model_kwargs={'n_gpu_layers': 10},
max_new_tokens=500,
# creativity=0 # Ajusta este valor según tus preferencias
)
llm_predictor = LLMPredictor(llm=llm)
# Contexto de almacenamiento y servicio
storage_context = StorageContext.from_defaults(persist_dir="./storage")
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
set_global_service_context(service_context=service_context)
# Cargar índice desde el almacenamiento
index = load_index_from_storage(storage_context, service_context=service_context)
# Inicializar la memoria del chat
if 'memory' not in st.session_state:
st.session_state['memory'] = ChatMemoryBuffer.from_defaults(token_limit=4096)
# Crear motor de chat
chat_engine = index.as_chat_engine(
chat_mode='context',
simularity_top_k=5,
memory=st.session_state['memory'],
# system_prompt="Responde amablemente la pregunta en base al contexto la data sobre las carreras de la Universidad Técnica Particular de Loja (UTPL)",
system_prompt = (
"Por favor, responde amablemente a la pregunta considerando el contexto y la información disponible sobre las carreras de la Universidad Técnica Particular de Loja (UTPL). "
"Puedes proporcionar detalles sobre programas académicos, números de ciclos, descripciones de carreras, perfiles profesionales u otra información relevante que esté exclusivamente en el archivo de datos. "
"Si no tienes información sobre el tema, simplemente indícalo en tu respuesta."
),
)
# similarity_top_k=1
# Inicializar mensajes
if 'messages' not in st.session_state:
st.session_state['messages'] = [{"role": "assistant", "content": "Hola! Qué quieres saber de la UTPL?"}]
# Mostrar mensajes anteriores
for message in st.session_state['messages']:
with st.chat_message(message['role']):
st.markdown(message['content'])
# Obtener la pregunta del usuario
if prompt := st.chat_input("¿Qué quieres saber sobre la UTPL?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
is_async = True
with st.chat_message("assistant"):
with st.spinner("Espere un momento..."):
if is_async:
# Obtener respuesta del motor de chat de forma asíncrona
response = chat_engine.stream_chat(prompt)
placeholder = st.empty()
full_response = ''
for item in response.response_gen:
full_response += item
placeholder.markdown(full_response)
placeholder.markdown(full_response)
else:
# Obtener respuesta del motor de chat de forma síncrona
full_response = chat_engine.chat(prompt)
st.markdown(full_response)
# Agregar la respuesta al historial de mensajes
st.session_state.messages.append({"role": "assistant", "content": full_response})
| [
"llama_index.memory.ChatMemoryBuffer.from_defaults",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.LlamaCPP",
"llama_index.StorageContext.from_defaults",
"llama_index.set_global_service_context",
"llama_index.load_index_from_storage"
] | [((293, 313), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (311, 313), False, 'import nest_asyncio\n'), ((679, 751), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""UTPL Chat"""', 'page_icon': '"""🤖"""', 'layout': '"""wide"""'}), "(page_title='UTPL Chat', page_icon='🤖', layout='wide')\n", (697, 751), True, 'import streamlit as st\n'), ((943, 1210), 'streamlit.markdown', 'st.markdown', (['f"""\n <style>\n .stApp {{\n background-color: black;\n background-attachment: fixed;\n background-size: cover}}\n </style>\n """'], {'unsafe_allow_html': '(True)'}), '(\n f"""\n <style>\n .stApp {{\n background-color: black;\n background-attachment: fixed;\n background-size: cover}}\n </style>\n """\n , unsafe_allow_html=True)\n', (954, 1210), True, 'import streamlit as st\n'), ((1203, 1226), 'streamlit.title', 'st.title', (['"""UTPL Chat 🤖"""'], {}), "('UTPL Chat 🤖')\n", (1211, 1226), True, 'import streamlit as st\n'), ((1321, 1453), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': '"""./models/llama-2-13b-chat.Q4_0.gguf"""', 'temperature': '(0)', 'model_kwargs': "{'n_gpu_layers': 10}", 'max_new_tokens': '(500)'}), "(model_path='./models/llama-2-13b-chat.Q4_0.gguf', temperature=0,\n model_kwargs={'n_gpu_layers': 10}, max_new_tokens=500)\n", (1329, 1453), False, 'from llama_index.llms import LlamaCPP\n'), ((1554, 1575), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1566, 1575), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context\n'), ((1634, 1687), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1662, 1687), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context\n'), ((1706, 1763), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (1734, 1763), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context\n'), ((1764, 1823), 'llama_index.set_global_service_context', 'set_global_service_context', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (1790, 1823), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context\n'), ((1872, 1945), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1895, 1945), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, set_global_service_context\n'), ((2050, 2098), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4096)'}), '(token_limit=4096)\n', (2080, 2098), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((3295, 3345), 'streamlit.chat_input', 'st.chat_input', (['"""¿Qué quieres saber sobre la UTPL?"""'], {}), "('¿Qué quieres saber sobre la UTPL?')\n", (3308, 3345), True, 'import streamlit as st\n'), ((3351, 3420), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (3383, 3420), True, 'import streamlit as st\n'), ((3174, 3206), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (3189, 3206), True, 'import streamlit as st\n'), ((3216, 3247), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (3227, 3247), True, 'import streamlit as st\n'), ((3431, 3454), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (3446, 3454), True, 'import streamlit as st\n'), ((3465, 3484), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (3476, 3484), True, 'import streamlit as st\n'), ((3520, 3548), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (3535, 3548), True, 'import streamlit as st\n'), ((4306, 4391), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (4338, 4391), True, 'import streamlit as st\n'), ((872, 894), 'base64.b64encode', 'base64.b64encode', (['data'], {}), '(data)\n', (888, 894), False, 'import base64\n'), ((3564, 3598), 'streamlit.spinner', 'st.spinner', (['"""Espere un momento..."""'], {}), "('Espere un momento...')\n", (3574, 3598), True, 'import streamlit as st\n'), ((3787, 3797), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (3795, 3797), True, 'import streamlit as st\n'), ((4215, 4241), 'streamlit.markdown', 'st.markdown', (['full_response'], {}), '(full_response)\n', (4226, 4241), True, 'import streamlit as st\n')] |
import streamlit as st
import os
import sys
import logging
from llama_index import TreeIndex, SimpleDirectoryReader, StorageContext, get_response_synthesizer, load_index_from_storage, ServiceContext
from llama_index.llms import OpenAI, ChatMessage
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.text_splitter import CodeSplitter
from llama_index.node_parser import SimpleNodeParser
import openai
import json
import nest_asyncio
nest_asyncio.apply()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger()
if not logger.handlers:
logger.addHandler(logging.StreamHandler(stream=sys.stdout))
# API key setup
openai.api_key = st.secrets.openai_key
# Get folder path from command-line arguments
folder_path = sys.argv[-1] if len(sys.argv) > 1 else './results'
# Get index name from folder name
index_name = os.path.basename(os.path.normpath(folder_path))
@st.cache_resource(show_spinner=False)
def load_data():
index = None
try:
# Rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=f'./storage/{index_name}')
# Load index
index = load_index_from_storage(storage_context, index_id=index_name)
logging.info(f"Loaded index: {index}")
return index
except Exception as e:
logging.info(f"Could not load index: {e}\nCreating new index")# with summary query: {SUMMARY_QUERY}")
documents = SimpleDirectoryReader(folder_path, recursive=True).load_data()
service_context = ServiceContext.from_defaults(llm=OpenAI(temperature=0, model="gpt-4"))#, temperature=0.5, system_prompt="You are an expert on the LLama index Python library and your job is to answer technical questions. Assume that all questions are related to the LLama index Python library. Keep your answers technical and based on facts – do not hallucinate features."))
text_splitter = CodeSplitter(
language="python",
chunk_lines=40,
chunk_lines_overlap=15,
max_chars=1500,
)
node_parser = SimpleNodeParser.from_defaults(text_splitter=text_splitter)
service_context = ServiceContext.from_defaults(node_parser=node_parser)
index = TreeIndex.from_documents(documents, service_context=service_context)
index.set_index_id(index_name)
index.storage_context.persist(f"./storage/{index_name}")
logging.info(f"Created index: {index}")
return index
index = load_data()
# Define chat engine
#mode = st.selectbox("Select query mode", ["condense_question", "best", "context", "simple", "react", "openai"])
#chat_engine = index.as_chat_engine(chat_mode=mode, verbose=True)
response_mode = st.selectbox("Select chat mode", ["best", "context", "condense_question", "simple", "react", "openai"])
include_history = st.checkbox("Include chat history", value=True)
# assemble query engine
chat_engine = index.as_chat_engine(chat_mode=response_mode, verbose=True)
st.subheader("Chat with {0} index".format(index_name))
# Initialize session state
if "messages" not in st.session_state.keys():
st.session_state.messages = []
# Chat UI
if prompt := st.chat_input("Your question"):
st.session_state.messages.append({"role": "user", "content": prompt})
# Display chat
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# Perform and display the query
if len(st.session_state.messages) > 0 and st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
response = None
with st.spinner("Thinking..."):
logging.info(prompt)
# Initialize an empty string to hold the concatenated tokens
concatenated_response = ""
chat_history = [ChatMessage(content=message["content"], role=message["role"]) for message in st.session_state.messages]
response = chat_engine.stream_chat(prompt, chat_history=chat_history if include_history else None)
message_placeholder = st.empty()
full_response = ""
for token in response.response_gen:
full_response += token
message_placeholder.write(full_response + "▌")
message_placeholder.write(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
# Button to export chat log
if len(st.session_state.messages) > 0 and st.button('Export Chat Log'):
chat_log_str = json.dumps(st.session_state.messages, indent=4)
st.download_button(
label="Download Chat Log",
data=chat_log_str,
file_name="chat_log.json",
mime="application/json",
) | [
"llama_index.SimpleDirectoryReader",
"llama_index.text_splitter.CodeSplitter",
"llama_index.ServiceContext.from_defaults",
"llama_index.TreeIndex.from_documents",
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.llms.ChatMessage",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.load_index_from_storage"
] | [((457, 477), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (475, 477), False, 'import nest_asyncio\n'), ((478, 536), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (497, 536), False, 'import logging\n'), ((547, 566), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (564, 566), False, 'import logging\n'), ((926, 963), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (943, 963), True, 'import streamlit as st\n'), ((2585, 2692), 'streamlit.selectbox', 'st.selectbox', (['"""Select chat mode"""', "['best', 'context', 'condense_question', 'simple', 'react', 'openai']"], {}), "('Select chat mode', ['best', 'context', 'condense_question',\n 'simple', 'react', 'openai'])\n", (2597, 2692), True, 'import streamlit as st\n'), ((2708, 2755), 'streamlit.checkbox', 'st.checkbox', (['"""Include chat history"""'], {'value': '(True)'}), "('Include chat history', value=True)\n", (2719, 2755), True, 'import streamlit as st\n'), ((888, 917), 'os.path.normpath', 'os.path.normpath', (['folder_path'], {}), '(folder_path)\n', (904, 917), False, 'import os\n'), ((2960, 2983), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2981, 2983), True, 'import streamlit as st\n'), ((3044, 3074), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (3057, 3074), True, 'import streamlit as st\n'), ((3080, 3149), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (3112, 3149), True, 'import streamlit as st\n'), ((4223, 4251), 'streamlit.button', 'st.button', (['"""Export Chat Log"""'], {}), "('Export Chat Log')\n", (4232, 4251), True, 'import streamlit as st\n'), ((4272, 4319), 'json.dumps', 'json.dumps', (['st.session_state.messages'], {'indent': '(4)'}), '(st.session_state.messages, indent=4)\n', (4282, 4319), False, 'import json\n'), ((4324, 4445), 'streamlit.download_button', 'st.download_button', ([], {'label': '"""Download Chat Log"""', 'data': 'chat_log_str', 'file_name': '"""chat_log.json"""', 'mime': '"""application/json"""'}), "(label='Download Chat Log', data=chat_log_str, file_name=\n 'chat_log.json', mime='application/json')\n", (4342, 4445), True, 'import streamlit as st\n'), ((614, 654), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (635, 654), False, 'import logging\n'), ((1050, 1117), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'f"""./storage/{index_name}"""'}), "(persist_dir=f'./storage/{index_name}')\n", (1078, 1117), False, 'from llama_index import TreeIndex, SimpleDirectoryReader, StorageContext, get_response_synthesizer, load_index_from_storage, ServiceContext\n'), ((1143, 1204), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'index_id': 'index_name'}), '(storage_context, index_id=index_name)\n', (1166, 1204), False, 'from llama_index import TreeIndex, SimpleDirectoryReader, StorageContext, get_response_synthesizer, load_index_from_storage, ServiceContext\n'), ((1207, 1245), 'logging.info', 'logging.info', (['f"""Loaded index: {index}"""'], {}), "(f'Loaded index: {index}')\n", (1219, 1245), False, 'import logging\n'), ((3217, 3249), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (3232, 3249), True, 'import streamlit as st\n'), ((3259, 3287), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (3267, 3287), True, 'import streamlit as st\n'), ((3423, 3451), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (3438, 3451), True, 'import streamlit as st\n'), ((3878, 3888), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (3886, 3888), True, 'import streamlit as st\n'), ((4070, 4155), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (4102, 4155), True, 'import streamlit as st\n'), ((1287, 1352), 'logging.info', 'logging.info', (['f"""Could not load index: {e}\nCreating new index"""'], {}), '(f"""Could not load index: {e}\nCreating new index""")\n', (1299, 1352), False, 'import logging\n'), ((1864, 1955), 'llama_index.text_splitter.CodeSplitter', 'CodeSplitter', ([], {'language': '"""python"""', 'chunk_lines': '(40)', 'chunk_lines_overlap': '(15)', 'max_chars': '(1500)'}), "(language='python', chunk_lines=40, chunk_lines_overlap=15,\n max_chars=1500)\n", (1876, 1955), False, 'from llama_index.text_splitter import CodeSplitter\n'), ((1982, 2041), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'text_splitter': 'text_splitter'}), '(text_splitter=text_splitter)\n', (2012, 2041), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((2062, 2115), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'node_parser': 'node_parser'}), '(node_parser=node_parser)\n', (2090, 2115), False, 'from llama_index import TreeIndex, SimpleDirectoryReader, StorageContext, get_response_synthesizer, load_index_from_storage, ServiceContext\n'), ((2127, 2195), 'llama_index.TreeIndex.from_documents', 'TreeIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (2151, 2195), False, 'from llama_index import TreeIndex, SimpleDirectoryReader, StorageContext, get_response_synthesizer, load_index_from_storage, ServiceContext\n'), ((2290, 2329), 'logging.info', 'logging.info', (['f"""Created index: {index}"""'], {}), "(f'Created index: {index}')\n", (2302, 2329), False, 'import logging\n'), ((3478, 3503), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (3488, 3503), True, 'import streamlit as st\n'), ((3512, 3532), 'logging.info', 'logging.info', (['prompt'], {}), '(prompt)\n', (3524, 3532), False, 'import logging\n'), ((3647, 3708), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'content': "message['content']", 'role': "message['role']"}), "(content=message['content'], role=message['role'])\n", (3658, 3708), False, 'from llama_index.llms import OpenAI, ChatMessage\n'), ((1404, 1454), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['folder_path'], {'recursive': '(True)'}), '(folder_path, recursive=True)\n', (1425, 1454), False, 'from llama_index import TreeIndex, SimpleDirectoryReader, StorageContext, get_response_synthesizer, load_index_from_storage, ServiceContext\n'), ((1520, 1556), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (1526, 1556), False, 'from llama_index.llms import OpenAI, ChatMessage\n')] |
import sounddevice as sd
import wavio
import whisper
from llama_index.llms import LlamaCPP
from llama_index.llms.base import ChatMessage
import streamlit as st
# Function to record audio
def record_audio(output_filename, duration, sample_rate):
st.write("Recording...")
audio_data = sd.rec(int(duration * sample_rate),
samplerate=sample_rate, channels=1)
sd.wait() # Wait until recording is finished
st.write("Recording finished.")
# Save the recorded audio to a WAV file
wavio.write(output_filename, audio_data, sample_rate, sampwidth=2)
# Function to transcribe audio
def transcribe_audio(audio_file):
model = whisper.load_model('small')
text = model.transcribe(audio_file)
return text['text']
# Function to check grammar and format
def check_grammar_and_format(text):
path = r'llama-2-13b.ggmlv3.q4_0.bin'
llm_gpt = LlamaCPP(model_path=path)
message = ChatMessage(role='user', content=f'check grammar for the following: {text}')
response = llm_gpt.chat([message])
return response.message.content
def main():
st.title("Speech-to-Text and Grammar Checking")
st.subheader('How was your day?')
recording_duration = st.sidebar.slider("Recording Duration (seconds)", 1, 60, 10)
sample_rate = 44100
# Button to start recording
if st.button("Start Recording"):
output_file = "recorded_audio.wav"
record_audio(output_file, recording_duration, sample_rate)
st.write("Audio saved as:", output_file)
if not sd.query_devices(None, 'input')['default_samplerate'] == sample_rate:
st.warning("Warning: The sample rate of the input device is not set to", sample_rate)
# Use a spinner for transcription
with st.spinner("Transcribing..."):
transcribed_text = transcribe_audio(output_file)
st.subheader("Transcribed Text:")
st.write(transcribed_text)
# Use a spinner for grammar check
with st.spinner("Checking Grammar..."):
grammar_check_result = check_grammar_and_format(transcribed_text)
st.subheader("Grammar Check Result:")
st.write(grammar_check_result)
# Button to upload audio file
uploaded_file = st.file_uploader("Upload an audio file", type=['wav', 'mp3', 'ogg'])
if uploaded_file is not None:
with open("uploaded_audio.wav", "wb") as f:
f.write(uploaded_file.getbuffer())
# Use a spinner for transcription
with st.spinner("Transcribing uploaded audio..."):
transcribed_text = transcribe_audio("uploaded_audio.wav")
st.subheader("Transcribed Text:")
st.write(transcribed_text)
# Use a spinner for grammar check
with st.spinner("Checking Grammar..."):
grammar_check_result = check_grammar_and_format(transcribed_text)
st.subheader("Grammar Check Result:")
st.write(grammar_check_result)
if __name__ == "__main__":
main()
| [
"llama_index.llms.LlamaCPP",
"llama_index.llms.base.ChatMessage"
] | [((259, 283), 'streamlit.write', 'st.write', (['"""Recording..."""'], {}), "('Recording...')\n", (267, 283), True, 'import streamlit as st\n'), ((404, 413), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (411, 413), True, 'import sounddevice as sd\n'), ((455, 486), 'streamlit.write', 'st.write', (['"""Recording finished."""'], {}), "('Recording finished.')\n", (463, 486), True, 'import streamlit as st\n'), ((539, 605), 'wavio.write', 'wavio.write', (['output_filename', 'audio_data', 'sample_rate'], {'sampwidth': '(2)'}), '(output_filename, audio_data, sample_rate, sampwidth=2)\n', (550, 605), False, 'import wavio\n'), ((688, 715), 'whisper.load_model', 'whisper.load_model', (['"""small"""'], {}), "('small')\n", (706, 715), False, 'import whisper\n'), ((919, 944), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'path'}), '(model_path=path)\n', (927, 944), False, 'from llama_index.llms import LlamaCPP\n'), ((960, 1036), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'f"""check grammar for the following: {text}"""'}), "(role='user', content=f'check grammar for the following: {text}')\n", (971, 1036), False, 'from llama_index.llms.base import ChatMessage\n'), ((1134, 1181), 'streamlit.title', 'st.title', (['"""Speech-to-Text and Grammar Checking"""'], {}), "('Speech-to-Text and Grammar Checking')\n", (1142, 1181), True, 'import streamlit as st\n'), ((1187, 1220), 'streamlit.subheader', 'st.subheader', (['"""How was your day?"""'], {}), "('How was your day?')\n", (1199, 1220), True, 'import streamlit as st\n'), ((1249, 1309), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Recording Duration (seconds)"""', '(1)', '(60)', '(10)'], {}), "('Recording Duration (seconds)', 1, 60, 10)\n", (1266, 1309), True, 'import streamlit as st\n'), ((1378, 1406), 'streamlit.button', 'st.button', (['"""Start Recording"""'], {}), "('Start Recording')\n", (1387, 1406), True, 'import streamlit as st\n'), ((2322, 2390), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload an audio file"""'], {'type': "['wav', 'mp3', 'ogg']"}), "('Upload an audio file', type=['wav', 'mp3', 'ogg'])\n", (2338, 2390), True, 'import streamlit as st\n'), ((1529, 1569), 'streamlit.write', 'st.write', (['"""Audio saved as:"""', 'output_file'], {}), "('Audio saved as:', output_file)\n", (1537, 1569), True, 'import streamlit as st\n'), ((1671, 1760), 'streamlit.warning', 'st.warning', (['"""Warning: The sample rate of the input device is not set to"""', 'sample_rate'], {}), "('Warning: The sample rate of the input device is not set to',\n sample_rate)\n", (1681, 1760), True, 'import streamlit as st\n'), ((1816, 1845), 'streamlit.spinner', 'st.spinner', (['"""Transcribing..."""'], {}), "('Transcribing...')\n", (1826, 1845), True, 'import streamlit as st\n'), ((1922, 1955), 'streamlit.subheader', 'st.subheader', (['"""Transcribed Text:"""'], {}), "('Transcribed Text:')\n", (1934, 1955), True, 'import streamlit as st\n'), ((1969, 1995), 'streamlit.write', 'st.write', (['transcribed_text'], {}), '(transcribed_text)\n', (1977, 1995), True, 'import streamlit as st\n'), ((2055, 2088), 'streamlit.spinner', 'st.spinner', (['"""Checking Grammar..."""'], {}), "('Checking Grammar...')\n", (2065, 2088), True, 'import streamlit as st\n'), ((2182, 2219), 'streamlit.subheader', 'st.subheader', (['"""Grammar Check Result:"""'], {}), "('Grammar Check Result:')\n", (2194, 2219), True, 'import streamlit as st\n'), ((2233, 2263), 'streamlit.write', 'st.write', (['grammar_check_result'], {}), '(grammar_check_result)\n', (2241, 2263), True, 'import streamlit as st\n'), ((2584, 2628), 'streamlit.spinner', 'st.spinner', (['"""Transcribing uploaded audio..."""'], {}), "('Transcribing uploaded audio...')\n", (2594, 2628), True, 'import streamlit as st\n'), ((2714, 2747), 'streamlit.subheader', 'st.subheader', (['"""Transcribed Text:"""'], {}), "('Transcribed Text:')\n", (2726, 2747), True, 'import streamlit as st\n'), ((2761, 2787), 'streamlit.write', 'st.write', (['transcribed_text'], {}), '(transcribed_text)\n', (2769, 2787), True, 'import streamlit as st\n'), ((2843, 2876), 'streamlit.spinner', 'st.spinner', (['"""Checking Grammar..."""'], {}), "('Checking Grammar...')\n", (2853, 2876), True, 'import streamlit as st\n'), ((2970, 3007), 'streamlit.subheader', 'st.subheader', (['"""Grammar Check Result:"""'], {}), "('Grammar Check Result:')\n", (2982, 3007), True, 'import streamlit as st\n'), ((3021, 3051), 'streamlit.write', 'st.write', (['grammar_check_result'], {}), '(grammar_check_result)\n', (3029, 3051), True, 'import streamlit as st\n'), ((1588, 1619), 'sounddevice.query_devices', 'sd.query_devices', (['None', '"""input"""'], {}), "(None, 'input')\n", (1604, 1619), True, 'import sounddevice as sd\n')] |
import os
import sys
import shutil
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
import langchain
from llama_index import StorageContext, load_index_from_storage
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
get_response_synthesizer,
)
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.indices.postprocessor import SimilarityPostprocessor
def init(resources):
db = resources['db']
dir = os.path.dirname(__file__)
parentDir = os.path.dirname(dir)
textS3Bucket = 'lawlet-uscode'
vectorS3Bucket = 'lawlet-uscode-vectors'
indexS3Bucket = 'lawlet-uscode-index'
localVectorDir = db
localSQLDumpDir = dir +'/'+'uscode'
vectorDownloadDir = dir +'/'+'vectordownload'
targzFile = localVectorDir + '/uscode.tar.gz'
if not os.path.exists(localSQLDumpDir):
os.makedirs(localSQLDumpDir)
os.system('tar -xvzf ' + targzFile + ' -C ' + dir)
#if not os.path.exists(localVectorDir):
# os.makedirs(localVectorDir)
# os.system('copy ' + vectorDownloadDir + '/docstore.json ' + localVectorDir + '/docstore.json')
# os.system('copy ' + vectorDownloadDir + '/vector_store.json ' + localVectorDir + '/vector_store.json')
# os.system('copy ' + vectorDownloadDir + '/index_store.json ' + localVectorDir + '/index_store.bin')
# os.system('copy ' + vectorDownloadDir + '/graph_store.json ' + localVectorDir + '/graph_store.json')
documents = SimpleDirectoryReader(localSQLDumpDir).load_data()
print("Rebuild StorageContext")
storage_context = StorageContext.from_defaults(persist_dir=localVectorDir)
print("Load index")
index = load_index_from_storage(storage_context)
# build index
#index = VectorStoreIndex.from_documents(documents)
# configure retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=1,
)
# configure response synthesizer
response_synthesizer = get_response_synthesizer()
# assemble query engine
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[
SimilarityPostprocessor(similarity_cutoff=0.0)
]
)
return query_engine
def query(query_engine, inputText):
queryText = "ignore the question and summarize the retrieved law. Question: " + inputText
response = query_engine.query(queryText)
responseText = response.response
metadata = response.metadata
metadataKeys = list(metadata.keys())
node = metadataKeys[0]
filename = metadata[node]['filename']
s3filename = metadata[node]['S3filename']
title = metadata[node]['title']
number = metadata[node]['number']
name = metadata[node]['name']
source_nodes = response.source_nodes
source_node = source_nodes[0]
score = source_node.score
data = None
if os.path.exists(filename):
data = open(filename, 'r').read()
#else:
# print("Downloading " + filename + " from S3")
# command = 's3cmd get ' + s3filename + ' ' + filename
# print(command)
# os.system(command)
# data = open( filename, 'r').read()
if data == None:
print("Error: data is None")
return None
results = {
"filename": filename,
"s3filename": s3filename,
"name": name,
"title": title,
"nr": number,
"score": score,
"summary": responseText,
"body": data
}
return results
if __name__ == '__main__':
question = "What does the law about Time of election say?"
queryEngine = init()
queryResult = query(queryEngine, question)
print(queryResult)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.get_response_synthesizer",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.StorageContext.from_defaults",
"llama_index.indices.postprocessor.SimilarityPostprocessor",
"llama_index.load_index_from_storage"
] | [((80, 93), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (91, 93), False, 'from dotenv import load_dotenv\n'), ((111, 138), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (120, 138), False, 'import os\n'), ((170, 197), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (179, 197), False, 'import os\n'), ((627, 652), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (642, 652), False, 'import os\n'), ((669, 689), 'os.path.dirname', 'os.path.dirname', (['dir'], {}), '(dir)\n', (684, 689), False, 'import os\n'), ((1757, 1813), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'localVectorDir'}), '(persist_dir=localVectorDir)\n', (1785, 1813), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((1850, 1890), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1873, 1890), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((2007, 2060), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(1)'}), '(index=index, similarity_top_k=1)\n', (2027, 2060), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((2149, 2175), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (2173, 2175), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, get_response_synthesizer\n'), ((3093, 3117), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (3107, 3117), False, 'import os\n'), ((987, 1018), 'os.path.exists', 'os.path.exists', (['localSQLDumpDir'], {}), '(localSQLDumpDir)\n', (1001, 1018), False, 'import os\n'), ((1028, 1056), 'os.makedirs', 'os.makedirs', (['localSQLDumpDir'], {}), '(localSQLDumpDir)\n', (1039, 1056), False, 'import os\n'), ((1065, 1115), 'os.system', 'os.system', (["('tar -xvzf ' + targzFile + ' -C ' + dir)"], {}), "('tar -xvzf ' + targzFile + ' -C ' + dir)\n", (1074, 1115), False, 'import os\n'), ((1648, 1686), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['localSQLDumpDir'], {}), '(localSQLDumpDir)\n', (1669, 1686), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, get_response_synthesizer\n'), ((2368, 2414), 'llama_index.indices.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': '(0.0)'}), '(similarity_cutoff=0.0)\n', (2391, 2414), False, 'from llama_index.indices.postprocessor import SimilarityPostprocessor\n')] |
import logging
import os
from typing import List
from decouple import config
from langchain import OpenAI
from llama_index import GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from llama_index.response.schema import Response, SourceNode
from pydantic import BaseModel
from data_provider import LlamaDataProvider, LocalDataProvider, S3DataProvider
os.environ["OPENAI_API_KEY"] = config("OPENAI_API_KEY")
max_input_size = 4096
num_output = 2000
max_chunk_overlap = 20
chunk_size_limit = 600
# text-davinci-003 offers the most tokens (around 4k)
llm_predictor = LLMPredictor(
llm=OpenAI(
temperature=0.5, model_name="text-davinci-003", max_tokens=num_output
)
)
prompt_helper = PromptHelper(
max_input_size,
num_output,
max_chunk_overlap,
chunk_size_limit=chunk_size_limit,
)
class LlamaQuestionnaire:
index: GPTSimpleVectorIndex = None
def __init__(self):
self.data_provider: LlamaDataProvider = self.__get_data_provider()
self.index = self.__load_index_from_data_provider()
def ask_gpt(self, question: str):
if self.index is None:
raise Exception("Unable to work with null index")
result = self.index.query(question, response_mode="compact")
try:
return GptResponse.from_response(result)
except Exception:
logging.error(
'Unable to parse result "%s"' % result, exc_info=True
)
raise
def delete_index(self):
self.data_provider.delete_index()
def __get_data_provider(self) -> LlamaDataProvider:
data_provider_config = config(
"DATA_PROVIDER", default="local", cast=str
)
is_s3_provider = (
data_provider_config is not None
and data_provider_config.lower() == "s3"
)
if is_s3_provider:
return S3DataProvider()
return LocalDataProvider()
# Newer version still have bugs when counting the token.
# service_context = ServiceContext.from_defaults(
# llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# index = GPTSimpleVectorIndex.from_documents(documents,
# service_context=service_context)
def __load_index_from_data_provider(self) -> GPTSimpleVectorIndex:
index = self.data_provider.get_index_file()
if self.index is not None:
return index
documents = self.data_provider.get_data_directory()
logging.info("Create index from stored documents")
index = GPTSimpleVectorIndex(
documents,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
)
self.data_provider.save_index(index)
return index
class GptSource(BaseModel):
source_text: str
doc_id: str
similarity: float
@staticmethod
def from_source_node(source_node: SourceNode):
return GptSource(
source_text=source_node.source_text,
doc_id=source_node.doc_id,
similarity=source_node.similarity,
)
class GptResponse(BaseModel):
summary: str
sources: List[GptSource]
@staticmethod
def from_response(response: Response):
sources: List[GptSource] = []
for element in response.source_nodes:
sources.append(GptSource.from_source_node(element))
return GptResponse(summary=response.response, sources=sources)
| [
"llama_index.GPTSimpleVectorIndex",
"llama_index.PromptHelper"
] | [((384, 408), 'decouple.config', 'config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (390, 408), False, 'from decouple import config\n'), ((699, 797), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_output, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (711, 797), False, 'from llama_index import GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((589, 666), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'model_name': '"""text-davinci-003"""', 'max_tokens': 'num_output'}), "(temperature=0.5, model_name='text-davinci-003', max_tokens=num_output)\n", (595, 666), False, 'from langchain import OpenAI\n'), ((1622, 1672), 'decouple.config', 'config', (['"""DATA_PROVIDER"""'], {'default': '"""local"""', 'cast': 'str'}), "('DATA_PROVIDER', default='local', cast=str)\n", (1628, 1672), False, 'from decouple import config\n'), ((1911, 1930), 'data_provider.LocalDataProvider', 'LocalDataProvider', ([], {}), '()\n', (1928, 1930), False, 'from data_provider import LlamaDataProvider, LocalDataProvider, S3DataProvider\n'), ((2467, 2517), 'logging.info', 'logging.info', (['"""Create index from stored documents"""'], {}), "('Create index from stored documents')\n", (2479, 2517), False, 'import logging\n'), ((2534, 2628), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (2554, 2628), False, 'from llama_index import GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((1878, 1894), 'data_provider.S3DataProvider', 'S3DataProvider', ([], {}), '()\n', (1892, 1894), False, 'from data_provider import LlamaDataProvider, LocalDataProvider, S3DataProvider\n'), ((1346, 1414), 'logging.error', 'logging.error', (['(\'Unable to parse result "%s"\' % result)'], {'exc_info': '(True)'}), '(\'Unable to parse result "%s"\' % result, exc_info=True)\n', (1359, 1414), False, 'import logging\n')] |
import os
import json
from urllib.parse import urlparse
import streamlit as st
from scrape_utils import scrape
from llama_index import download_loader, GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext, LLMPredictor, PromptHelper
from llama_index.llm_predictor.chatgpt import ChatGPTLLMPredictor
from llama_index.node_parser import SimpleNodeParser
from langchain import OpenAI
from langchain.agents import Tool
from langchain.agents import initialize_agent
from langchain.chains.conversation.memory import ConversationBufferMemory
from llama_index.langchain_helpers.memory_wrapper import GPTIndexChatMemory
index_name = "./index.json"
documents_folder = "./documents"
def get_index_name(input_urls):
basepoints = input_urls.split('\n')
index_name = ""
for basepoint in basepoints:
index_name += urlparse(basepoint).netloc.split(".")[0] + "&"
return index_name
def load_documents_to_gpt_vectorstore(input_urls):
index_name = get_index_name(input_urls)
if os.path.exists("../index/"+index_name+".json"):
print("index found")
return GPTSimpleVectorIndex.load_from_disk("../index/"+index_name+".json")
else:
print("building new index")
basepoints = input_urls.split('\n')
endpoints = scrape(basepoints)
BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
loader = BeautifulSoupWebReader()
documents = loader.load_data(endpoints)
parser = SimpleNodeParser()
nodes = parser.get_nodes_from_documents(documents)
llm_predictor = LLMPredictor(
llm=OpenAI(temperature=0, model_name="text-davinci-003")
)
max_input_size = 4096
num_output = 2048
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTSimpleVectorIndex(nodes, service_context=service_context)
index.save_to_disk("../index/"+index_name+".json")
return index
def build_conversation(urls):
index_name = get_index_name(urls)
if not os.path.exists("../index/"+index_name+".json"):
return "index not found, click on 'load documents' first"
else: print("index found")
index = GPTSimpleVectorIndex.load_from_disk("../index/"+index_name+".json")
tools = [
Tool(
name = "GPT Index",
func=lambda q: str(index.query(q)),
description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
return_direct=True
),
]
memory = GPTIndexChatMemory(
index=index,
memory_key="chat_history",
query_kwargs={"response_mode": "compact"},
# return_source returns source nodes instead of querying index
return_source=True,
# return_messages returns context in message format
return_messages=True
)
llm=OpenAI(temperature=0, model_name="text-davinci-003")
st.session_state.agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory)
def chat(query,urls):
index_name = get_index_name(urls)
if not os.path.exists("../index/"+index_name+".json"):
return "index not found, click on 'load documents' first"
else: print("index found")
index = GPTSimpleVectorIndex.load_from_disk("../index/"+index_name+".json")
tools = [
Tool(
name = "GPT Index",
func=lambda q: str(index.query(q)),
description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
return_direct=True
),
]
memory = GPTIndexChatMemory(
index=index,
memory_key="chat_history",
query_kwargs={"response_mode": "compact"},
# return_source returns source nodes instead of querying index
return_source=True,
# return_messages returns context in message format
return_messages=True
)
llm=OpenAI(temperature=0, model_name="text-davinci-003")
agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory)
response = agent_chain.run(input=query)
# response = st.session_state.agent_chain.run(input=query)
return response
# loading openai api key from json file
with open('../keys_and_tokens.json', 'r') as f:
data = json.load(f)
openai_api_key = data.get("openai-api-key")
if openai_api_key:
os.environ["OPENAI_API_KEY"] = openai_api_key
else:
print("openai_api_key not found in JSON file")
st.header("LearnWithGPT Demo")
st.session_state.agent_chain = None
doc_input = st.text_area("Enter a URL to scrape and index")
if st.button("load documents"):
st.markdown(load_documents_to_gpt_vectorstore(doc_input))
build_conversation(doc_input)
user_input = st.text_area("ask about the docs")
if st.button("Ask"):
st.write(chat(user_input, doc_input))
# @st.cache_resource
# def initialize_index(index_name, documents_folder):
# llm_predictor = ChatGPTLLMPredictor()
# service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
# if os.path.exists(index_name):
# index = GPTSimpleVectorIndex.load_from_disk(index_name, service_context=service_context)
# else:
# documents = SimpleDirectoryReader(documents_folder).load_data()
# index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
# index.save_to_disk(index_name)
# return index
# @st.cache_data(max_entries=200, persist=True)
# def query_index(_index, query_text):
# response = _index.query(query_text)
# return str(response)
# st.title("🦙 Llama Index Demo 🦙")
# st.header("Welcome to the Llama Index Streamlit Demo")
# st.write("Enter a query about Paul Graham's essays. You can check out the original essay [here](https://raw.githubusercontent.com/jerryjliu/llama_index/main/examples/paul_graham_essay/data/paul_graham_essay.txt). Your query will be answered using the essay as context, using embeddings from text-ada-002 and LLM completions from ChatGPT. You can read more about Llama Index and how this works in [our docs!](https://gpt-index.readthedocs.io/en/latest/index.html)")
# index = None
# api_key = st.text_input("Enter your OpenAI API key here:", type="password")
# if api_key:
# os.environ['OPENAI_API_KEY'] = api_key
# index = initialize_index(index_name, documents_folder)
# if index is None:
# st.warning("Please enter your api key first.")
# text = st.text_input("Query text:", value="What did the author do growing up?")
# if st.button("Run Query") and text is not None:
# response = query_index(index, text)
# st.markdown(response)
# llm_col, embed_col = st.columns(2)
# with llm_col:
# st.markdown(f"LLM Tokens Used: {index.service_context.llm_predictor._last_token_usage}")
# with embed_col:
# st.markdown(f"Embedding Tokens Used: {index.service_context.embed_model._last_token_usage}")
| [
"llama_index.download_loader",
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex",
"llama_index.langchain_helpers.memory_wrapper.GPTIndexChatMemory"
] | [((4729, 4759), 'streamlit.header', 'st.header', (['"""LearnWithGPT Demo"""'], {}), "('LearnWithGPT Demo')\n", (4738, 4759), True, 'import streamlit as st\n'), ((4809, 4856), 'streamlit.text_area', 'st.text_area', (['"""Enter a URL to scrape and index"""'], {}), "('Enter a URL to scrape and index')\n", (4821, 4856), True, 'import streamlit as st\n'), ((4860, 4887), 'streamlit.button', 'st.button', (['"""load documents"""'], {}), "('load documents')\n", (4869, 4887), True, 'import streamlit as st\n'), ((5003, 5037), 'streamlit.text_area', 'st.text_area', (['"""ask about the docs"""'], {}), "('ask about the docs')\n", (5015, 5037), True, 'import streamlit as st\n'), ((5041, 5057), 'streamlit.button', 'st.button', (['"""Ask"""'], {}), "('Ask')\n", (5050, 5057), True, 'import streamlit as st\n'), ((1005, 1055), 'os.path.exists', 'os.path.exists', (["('../index/' + index_name + '.json')"], {}), "('../index/' + index_name + '.json')\n", (1019, 1055), False, 'import os\n'), ((1267, 1285), 'scrape_utils.scrape', 'scrape', (['basepoints'], {}), '(basepoints)\n', (1273, 1285), False, 'from scrape_utils import scrape\n'), ((1315, 1356), 'llama_index.download_loader', 'download_loader', (['"""BeautifulSoupWebReader"""'], {}), "('BeautifulSoupWebReader')\n", (1330, 1356), False, 'from llama_index import download_loader, GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext, LLMPredictor, PromptHelper\n'), ((1452, 1470), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (1468, 1470), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1736, 1795), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (1748, 1795), False, 'from llama_index import download_loader, GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext, LLMPredictor, PromptHelper\n'), ((1818, 1909), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (1846, 1909), False, 'from llama_index import download_loader, GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext, LLMPredictor, PromptHelper\n'), ((1917, 1977), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['nodes'], {'service_context': 'service_context'}), '(nodes, service_context=service_context)\n', (1937, 1977), False, 'from llama_index import download_loader, GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext, LLMPredictor, PromptHelper\n'), ((2287, 2358), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (["('../index/' + index_name + '.json')"], {}), "('../index/' + index_name + '.json')\n", (2322, 2358), False, 'from llama_index import download_loader, GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext, LLMPredictor, PromptHelper\n'), ((2676, 2824), 'llama_index.langchain_helpers.memory_wrapper.GPTIndexChatMemory', 'GPTIndexChatMemory', ([], {'index': 'index', 'memory_key': '"""chat_history"""', 'query_kwargs': "{'response_mode': 'compact'}", 'return_source': '(True)', 'return_messages': '(True)'}), "(index=index, memory_key='chat_history', query_kwargs={\n 'response_mode': 'compact'}, return_source=True, return_messages=True)\n", (2694, 2824), False, 'from llama_index.langchain_helpers.memory_wrapper import GPTIndexChatMemory\n'), ((3007, 3059), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0, model_name='text-davinci-003')\n", (3013, 3059), False, 'from langchain import OpenAI\n'), ((3095, 3184), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""conversational-react-description"""', 'memory': 'memory'}), "(tools, llm, agent='conversational-react-description',\n memory=memory)\n", (3111, 3184), False, 'from langchain.agents import initialize_agent\n'), ((3410, 3481), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (["('../index/' + index_name + '.json')"], {}), "('../index/' + index_name + '.json')\n", (3445, 3481), False, 'from llama_index import download_loader, GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext, LLMPredictor, PromptHelper\n'), ((3799, 3947), 'llama_index.langchain_helpers.memory_wrapper.GPTIndexChatMemory', 'GPTIndexChatMemory', ([], {'index': 'index', 'memory_key': '"""chat_history"""', 'query_kwargs': "{'response_mode': 'compact'}", 'return_source': '(True)', 'return_messages': '(True)'}), "(index=index, memory_key='chat_history', query_kwargs={\n 'response_mode': 'compact'}, return_source=True, return_messages=True)\n", (3817, 3947), False, 'from llama_index.langchain_helpers.memory_wrapper import GPTIndexChatMemory\n'), ((4130, 4182), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0, model_name='text-davinci-003')\n", (4136, 4182), False, 'from langchain import OpenAI\n'), ((4201, 4290), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""conversational-react-description"""', 'memory': 'memory'}), "(tools, llm, agent='conversational-react-description',\n memory=memory)\n", (4217, 4290), False, 'from langchain.agents import initialize_agent\n'), ((4528, 4540), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4537, 4540), False, 'import json\n'), ((1097, 1168), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (["('../index/' + index_name + '.json')"], {}), "('../index/' + index_name + '.json')\n", (1132, 1168), False, 'from llama_index import download_loader, GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext, LLMPredictor, PromptHelper\n'), ((2130, 2180), 'os.path.exists', 'os.path.exists', (["('../index/' + index_name + '.json')"], {}), "('../index/' + index_name + '.json')\n", (2144, 2180), False, 'import os\n'), ((3253, 3303), 'os.path.exists', 'os.path.exists', (["('../index/' + index_name + '.json')"], {}), "('../index/' + index_name + '.json')\n", (3267, 3303), False, 'import os\n'), ((1577, 1629), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0, model_name='text-davinci-003')\n", (1583, 1629), False, 'from langchain import OpenAI\n'), ((828, 847), 'urllib.parse.urlparse', 'urlparse', (['basepoint'], {}), '(basepoint)\n', (836, 847), False, 'from urllib.parse import urlparse\n')] |
from typing import Any, List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.response_synthesizers import (
BaseSynthesizer,
ResponseMode,
get_response_synthesizer,
)
from llama_index.core.schema import NodeWithScore, QueryBundle
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
llm_from_settings_or_context,
)
class RetrieverQueryEngine(BaseQueryEngine):
"""Retriever query engine.
Args:
retriever (BaseRetriever): A retriever object.
response_synthesizer (Optional[BaseSynthesizer]): A BaseSynthesizer
object.
callback_manager (Optional[CallbackManager]): A callback manager.
"""
def __init__(
self,
retriever: BaseRetriever,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
) -> None:
self._retriever = retriever
self._response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm_from_settings_or_context(Settings, retriever.get_service_context()),
callback_manager=callback_manager
or callback_manager_from_settings_or_context(
Settings, retriever.get_service_context()
),
)
self._node_postprocessors = node_postprocessors or []
callback_manager = (
callback_manager or self._response_synthesizer.callback_manager
)
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = callback_manager
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {"response_synthesizer": self._response_synthesizer}
@classmethod
def from_args(
cls,
retriever: BaseRetriever,
llm: Optional[LLM] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
# response synthesizer args
response_mode: ResponseMode = ResponseMode.COMPACT,
text_qa_template: Optional[BasePromptTemplate] = None,
refine_template: Optional[BasePromptTemplate] = None,
summary_template: Optional[BasePromptTemplate] = None,
simple_template: Optional[BasePromptTemplate] = None,
output_cls: Optional[BaseModel] = None,
use_async: bool = False,
streaming: bool = False,
# deprecated
service_context: Optional[ServiceContext] = None,
**kwargs: Any,
) -> "RetrieverQueryEngine":
"""Initialize a RetrieverQueryEngine object.".
Args:
retriever (BaseRetriever): A retriever object.
service_context (Optional[ServiceContext]): A ServiceContext object.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
verbose (bool): Whether to print out debug info.
response_mode (ResponseMode): A ResponseMode object.
text_qa_template (Optional[BasePromptTemplate]): A BasePromptTemplate
object.
refine_template (Optional[BasePromptTemplate]): A BasePromptTemplate object.
simple_template (Optional[BasePromptTemplate]): A BasePromptTemplate object.
use_async (bool): Whether to use async.
streaming (bool): Whether to use streaming.
optimizer (Optional[BaseTokenUsageOptimizer]): A BaseTokenUsageOptimizer
object.
"""
llm = llm or llm_from_settings_or_context(Settings, service_context)
response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm,
service_context=service_context,
text_qa_template=text_qa_template,
refine_template=refine_template,
summary_template=summary_template,
simple_template=simple_template,
response_mode=response_mode,
output_cls=output_cls,
use_async=use_async,
streaming=streaming,
)
callback_manager = callback_manager_from_settings_or_context(
Settings, service_context
)
return cls(
retriever=retriever,
response_synthesizer=response_synthesizer,
callback_manager=callback_manager,
node_postprocessors=node_postprocessors,
)
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = self._retriever.retrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
async def aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever.aretrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
def with_retriever(self, retriever: BaseRetriever) -> "RetrieverQueryEngine":
return RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=self._response_synthesizer,
callback_manager=self.callback_manager,
node_postprocessors=self._node_postprocessors,
)
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
return self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
return await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = self.retrieve(query_bundle)
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = await self.aretrieve(query_bundle)
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
@property
def retriever(self) -> BaseRetriever:
"""Get the retriever object."""
return self._retriever
| [
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.response_synthesizers.get_response_synthesizer"
] | [((4987, 5055), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (5028, 5055), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((4419, 4474), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4447, 4474), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((4531, 4843), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'llm': 'llm', 'service_context': 'service_context', 'text_qa_template': 'text_qa_template', 'refine_template': 'refine_template', 'summary_template': 'summary_template', 'simple_template': 'simple_template', 'response_mode': 'response_mode', 'output_cls': 'output_cls', 'use_async': 'use_async', 'streaming': 'streaming'}), '(llm=llm, service_context=service_context,\n text_qa_template=text_qa_template, refine_template=refine_template,\n summary_template=summary_template, simple_template=simple_template,\n response_mode=response_mode, output_cls=output_cls, use_async=use_async,\n streaming=streaming)\n', (4555, 4843), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, ResponseMode, get_response_synthesizer\n')] |
import os
from starlette.concurrency import run_in_threadpool
import google.generativeai as genai
from llama_index.llms.gemini import Gemini
from llama_index.embeddings import GooglePaLMEmbedding
from gateway_llms.app.utils.logs import LogApplication, log_function
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
genai.configure(api_key=GOOGLE_API_KEY)
@log_function
async def transform_history(history: list, log_user: LogApplication):
for message in history:
message.update({"parts": [{"text": message.get("content")}]})
del message["content"]
if message.get("role") == "assistant":
message.update({"role": "model"})
return history
@log_function
async def gemini_chat_completion(
message: str,
system: str,
history: list,
generation_config: dict,
safety_settings: dict,
log_user: LogApplication
):
model = genai.GenerativeModel('gemini-pro')
history.insert(0, {"role": "user", "content": "Quem é você?"})
history.insert(1, {"role": "assistant", "content": system})
chat_history = await transform_history(history, log_user)
chat = model.start_chat(history=chat_history)
response = await chat.send_message_async(
message,
generation_config=generation_config,
safety_settings=safety_settings
)
return response.text
@log_function
async def gemini_embeddings(
text: str,
log_user: LogApplication
):
response = await run_in_threadpool(
genai.embed_content,
'models/embedding-001',
text
)
return response.get("embedding")
@log_function
def get_embbeding_model(log_user: LogApplication):
model_name = "models/embedding-gecko-001"
return GooglePaLMEmbedding(
model_name=model_name,
api_key=GOOGLE_API_KEY,
)
@log_function
def get_chat_model(generation_config: dict, safety_settings: dict, log_user: LogApplication):
temperature = 0.1 if not generation_config.get(
"temperature"
) else generation_config.get(
"temperature"
)
return Gemini(
GOOGLE_API_KEY,
temperature=temperature,
max_tokens=None,
generation_config=generation_config,
safety_settings=safety_settings
)
| [
"llama_index.embeddings.GooglePaLMEmbedding",
"llama_index.llms.gemini.Gemini"
] | [((285, 312), 'os.getenv', 'os.getenv', (['"""GOOGLE_API_KEY"""'], {}), "('GOOGLE_API_KEY')\n", (294, 312), False, 'import os\n'), ((314, 353), 'google.generativeai.configure', 'genai.configure', ([], {'api_key': 'GOOGLE_API_KEY'}), '(api_key=GOOGLE_API_KEY)\n', (329, 353), True, 'import google.generativeai as genai\n'), ((887, 922), 'google.generativeai.GenerativeModel', 'genai.GenerativeModel', (['"""gemini-pro"""'], {}), "('gemini-pro')\n", (908, 922), True, 'import google.generativeai as genai\n'), ((1725, 1791), 'llama_index.embeddings.GooglePaLMEmbedding', 'GooglePaLMEmbedding', ([], {'model_name': 'model_name', 'api_key': 'GOOGLE_API_KEY'}), '(model_name=model_name, api_key=GOOGLE_API_KEY)\n', (1744, 1791), False, 'from llama_index.embeddings import GooglePaLMEmbedding\n'), ((2073, 2211), 'llama_index.llms.gemini.Gemini', 'Gemini', (['GOOGLE_API_KEY'], {'temperature': 'temperature', 'max_tokens': 'None', 'generation_config': 'generation_config', 'safety_settings': 'safety_settings'}), '(GOOGLE_API_KEY, temperature=temperature, max_tokens=None,\n generation_config=generation_config, safety_settings=safety_settings)\n', (2079, 2211), False, 'from llama_index.llms.gemini import Gemini\n'), ((1463, 1531), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', (['genai.embed_content', '"""models/embedding-001"""', 'text'], {}), "(genai.embed_content, 'models/embedding-001', text)\n", (1480, 1531), False, 'from starlette.concurrency import run_in_threadpool\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.