date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | DeNeutoy/bert-syntax | eval_openai_gpt.py | # coding=utf-8
from pytorch_pretrained_bert import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, BertTokenizer
import torch
import sys
import csv
import logging
import itertools
logging.basicConfig(level=logging.INFO)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = "openai-gpt"
print("using model: {}".format(model_name), file=sys.stderr)
split_words = True
if 'no_split' in sys.argv:
split_words = False
print("We don't split words", file=sys.stderr)
use_postfix = False
if 'use_postfix' in sys.argv:
use_postfix = True
print("We compute probabilities over the entire sentence", file=sys.stderr)
model = OpenAIGPTLMHeadModel.from_pretrained(model_name)
tokenizer = OpenAIGPTTokenizer.from_pretrained(model_name)
bert_tokenizer=BertTokenizer.from_pretrained('bert-base-uncased')
model.eval()
model.to(device)
def get_probs_for_words(sent, w1, w2):
pre, target, post = sent.split("***")
if "mask" in target.lower():
target = ["[MASK]"]
else:
target = tokenizer.tokenize(target)
tokens = tokenizer.tokenize(pre)
target_idx = len(tokens)
# Filter answers based on BERT wordpieces to align with BERT results
try:
word_ids=bert_tokenizer.convert_tokens_to_ids([w1,w2])
except KeyError:
print("skipping",w1,w2,"bad wins")
return None
tok_w1, tok_w2 = tokenizer.tokenize(w1), tokenizer.tokenize(w2)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
w1_ids = tokenizer.convert_tokens_to_ids(tok_w1)
w2_ids = tokenizer.convert_tokens_to_ids(tok_w2)
if len(input_ids) == 0:
print("skipping",pre,w1,w2,"empty beggingin")
return None
if not split_words and (len(tok_w1) > 1 or len(tok_w2) > 1):
print("skipping",pre,w1,w2,"splitted words")
return None
if use_postfix:
# Add post focus tokens
end_tokens = tokenizer.tokenize(post)
end_ids = tokenizer.convert_tokens_to_ids(end_tokens)
w1_ids += end_ids
w2_ids += end_ids
# Compute the score for w1 and w2
add_tok_w1 = []
add_tok_w2 = []
score_w1 = 0
score_w2 = 0
for ids_w1, ids_w2 in itertools.zip_longest(w1_ids, w2_ids):
tens = torch.LongTensor([input_ids + add_tok_w1, input_ids + add_tok_w2]).to(device)
with torch.no_grad():
res = model(tens)
res = res[..., 0:model.config.vocab_size] # Restrict to the vocabulary only
res = torch.nn.functional.log_softmax(res, dim=-1)
if ids_w1 is not None:
score_w1 = score_w1 + res[0, -1, ids_w1].item()
if ids_w2 is not None:
score_w2 = score_w2 + res[1, -1, ids_w2].item()
add_tok_w1.append(ids_w1 if ids_w1 is not None else [0])
add_tok_w2.append(ids_w2 if ids_w2 is not None else [0])
# Compute the score for w2
# add_tok = []
# score_w2 = 0
# for ids in w2_ids:
# tens = torch.LongTensor(input_ids + add_tok).unsqueeze(0).to(device)
# with torch.no_grad():
# res = model(tens)
# res = res[..., 0:model.config.vocab_size] # Restrict to the vocabulary only
# res = torch.nn.functional.log_softmax(res,dim=-1)
# score_w2 = score_w2 + res[0, -1, ids]
# add_tok.append(ids)
return [float(score_w1), float(score_w2)]
from collections import Counter
def load_marvin():
cc = Counter()
# note: I edited the LM_Syneval/src/make_templates.py script, and run "python LM_Syneval/src/make_templates.py LM_Syneval/data/templates/ > marvin_linzen_dataset.tsv"
out = []
for line in open("marvin_linzen_dataset.tsv"):
case = line.strip().split("\t")
cc[case[1]] += 1
g, ug = case[-2], case[-1]
g = g.split()
ug = ug.split()
assert len(g) == len(ug), (g, ug)
diffs = [i for i, pair in enumerate(zip(g, ug)) if pair[0] != pair[1]]
if len(diffs) != 1:
# print(diffs)
# print(g,ug)
continue
assert len(diffs) == 1, diffs
gv = g[diffs[0]] # good
ugv = ug[diffs[0]] # bad
g[diffs[0]] = "***mask***"
g.append(".")
out.append((case[0], case[1], " ".join(g), gv, ugv))
return out
def eval_marvin():
o = load_marvin()
print(len(o), file=sys.stderr)
from collections import defaultdict
import time
rc = defaultdict(Counter)
tc = Counter()
start = time.time()
for i, (case, tp, s, g, b) in enumerate(o):
ps = get_probs_for_words(s, g, b)
if ps is None:
ps = [0, 1]
gp = ps[0]
bp = ps[1]
print(gp > bp, case, tp, g, b, s)
if i % 100 == 0:
print(i, time.time() - start, file=sys.stderr)
start = time.time()
sys.stdout.flush()
def eval_lgd():
for i, line in enumerate(open("lgd_dataset.tsv", encoding="utf8")):
# for i,line in enumerate(open("lgd_dataset_with_is_are.tsv",encoding="utf8")):
na, _, masked, good, bad = line.strip().split("\t")
ps = get_probs_for_words(masked, good, bad)
if ps is None:
continue
gp = ps[0]
bp = ps[1]
print(str(gp > bp), na, good, gp, bad, bp, masked.encode("utf8"), sep=u"\t")
if i % 100 == 0:
print(i, file=sys.stderr)
sys.stdout.flush()
def read_gulordava():
rows = csv.DictReader(open("generated.tab", encoding="utf8"), delimiter="\t")
data = []
for row in rows:
row2 = next(rows)
assert row["sent"] == row2["sent"]
assert row["class"] == "correct"
assert row2["class"] == "wrong"
sent = row["sent"].lower().split()[:-1] # dump the <eos> token.
good_form = row["form"]
bad_form = row2["form"]
sent[int(row["len_prefix"])] = "***mask***"
sent = " ".join(sent)
data.append((sent, row["n_attr"], good_form, bad_form))
return data
def eval_gulordava():
for i, (masked, natt, good, bad) in enumerate(read_gulordava()):
if good in ["is", "are"]:
print("skipping is/are")
continue
ps = get_probs_for_words(masked, good, bad)
if ps is None:
continue
gp = ps[0]
bp = ps[1]
print(str(gp > bp), natt, good, gp, bad, bp, masked.encode("utf8"), sep=u"\t")
if i % 100 == 0:
print(i, file=sys.stderr)
sys.stdout.flush()
if "marvin" in sys.argv:
eval_marvin()
elif "gul" in sys.argv:
eval_gulordava()
else:
eval_lgd()
| [] |
2024-01-10 | jojowither/Taiwan-Stock-Knowledge-Graph | chat_app~text2cypher.py | import os
import openai
from retry import retry
from dotenv import load_dotenv
from training import examples
load_dotenv()
openai.api_key = os.getenv('OPENAI_KEY')
openai_model = os.getenv('OPENAI_MODEL')
system = f"""
你是一個能夠基於 Cypher queries 示例生成 Cypher queries的助手。示例 Cypher queries為:\n {examples} \n
除了 Cypher queries之外,請不要回答任何解釋或其他訊息,這點務必注意。
你不需要道歉,嚴格根據提供的 Cypher queries示例生成 Cypher 語句。
不提供任何無法從 Cypher queries示例推斷出來的 Cypher 語句。
當缺乏對話內容的上下文而無法推斷出 Cypher 語句時,請告知用戶缺少的上下文並說明原因。
只需要生成Cypher queries,不要其他文字訊息。
"""
@retry(tries=2, delay=5)
def generate_cypher(messages):
messages = [
{"role": "system", "content": system}
] + messages
print(messages)
# Make a request to OpenAI
completions = openai.ChatCompletion.create(
model=openai_model,
messages=messages,
temperature=0.0
)
response = completions.choices[0].message.content
# Sometime the models bypasses system prompt and returns
# data based on previous dialogue history
if not "MATCH" in response and "{" in response:
raise Exception(
"GPT繞過了系統訊息,根據以前的對話記錄回覆。 " + response)
# If the model apologized, remove the word
if "抱歉" in response:
response = " ".join(response[2:])
# Sometime the model adds quotes around Cypher when it wants to explain stuff
if "`" in response:
response = response.split("```")[1].strip("`")
print(response)
return response
if __name__ == '__main__':
generate_cypher([{'role': 'user', 'content': '查詢股票代碼2330基本資訊'}])
generate_cypher([{'role': 'user', 'content': '列出廣達副總經理持股張數的基本統計'}]) | [
"查詢股票代碼2330基本資訊",
"[\n {\"role\": \"system\", \"content\": system}\n ] + messages",
"\n你是一個能夠基於 Cypher queries 示例生成 Cypher queries的助手。示例 Cypher queries為:\n PLACEHOLDER \n\n除了 Cypher queries之外,請不要回答任何解釋或其他訊息,這點務必注意。\n你不需要道歉,嚴格根據提供的 Cypher queries示例生成 Cypher 語句。\n不提供任何無法從 Cypher queries示例推斷出來的 Cypher 語句。\n當缺乏對話內容的上下文而無法推斷出 Cypher 語句時,請告知用戶缺少的上下文並說明原因。\n只需要生成Cypher queries,不要其他文字訊息。\n",
"列出廣達副總經理持股張數的基本統計"
] |
2024-01-10 | jojowither/Taiwan-Stock-Knowledge-Graph | chat_app~graph2text.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_KEY')
openai_model = os.getenv('OPENAI_MODEL')
system = f"""
你是一個助手,可以根據給定的訊息生成文本,以形成人們易於理解的答案。
最新的提示包含訊息,你需要基於給定的訊息生成人類可讀的回應。
讓它的訊息回覆看起來像來自一個 AI 助手,但不要添加任何額外訊息。
不要添加任何額外的訊息,除非最新的提示中有明確提供。
我再次強調,不要添加任何未明確給定的訊息。
"""
def generate_response(messages):
messages = [
{"role": "system", "content": system}
] + messages
print(messages)
# Make a request to OpenAI
completions = openai.ChatCompletion.create(
model=openai_model,
messages=messages,
temperature=0.0
)
response = completions.choices[0].message.content
print(response)
# If the model apologized, remove the word
if "抱歉" in response:
response = " ".join(response[2:])
return response
if __name__ == '__main__':
data = [{"market":"上市","code":"2330","name":"台積電","pagerank":0.002635887482379148,"community":0,"stock_id":"2330"}]
print(generate_response([{'role': 'user', 'content': str(data)}]))
| [
"\n你是一個助手,可以根據給定的訊息生成文本,以形成人們易於理解的答案。\n最新的提示包含訊息,你需要基於給定的訊息生成人類可讀的回應。\n讓它的訊息回覆看起來像來自一個 AI 助手,但不要添加任何額外訊息。\n不要添加任何額外的訊息,除非最新的提示中有明確提供。\n我再次強調,不要添加任何未明確給定的訊息。\n",
"[\n {\"role\": \"system\", \"content\": system}\n ] + messages"
] |
2024-01-10 | rock-learning/bolero | bolero~environment~test~test_openaigym.py | import numpy as np
from bolero.environment import OpenAiGym, gym_available
if not gym_available:
from nose import SkipTest
raise SkipTest("gym is not installed")
from nose.tools import assert_equal, assert_true
def test_discrete_input():
env = OpenAiGym("CartPole-v0")
env.init()
assert_equal(env.get_num_inputs(), 1)
assert_equal(env.get_num_outputs(), 4)
inputs = np.zeros(env.get_num_inputs())
outputs = np.zeros(env.get_num_outputs())
env.reset()
env.get_outputs(outputs)
i = 0
while not env.is_evaluation_done():
env.set_inputs(inputs)
env.step_action()
env.get_outputs(outputs)
i += 1
assert_true(env.is_evaluation_done())
feedback = env.get_feedback()
assert_equal(len(feedback), i)
assert_equal(env.get_maximum_feedback(), 195.0)
def test_box_input():
env = OpenAiGym("Pendulum-v0")
env.init()
assert_equal(env.get_num_inputs(), 1)
assert_equal(env.get_num_outputs(), 3)
inputs = np.zeros(env.get_num_inputs())
outputs = np.zeros(env.get_num_outputs())
env.reset()
env.get_outputs(outputs)
i = 0
while not env.is_evaluation_done():
env.set_inputs(inputs)
env.step_action()
env.get_outputs(outputs)
i += 1
assert_true(env.is_evaluation_done())
feedback = env.get_feedback()
assert_equal(len(feedback), i)
assert_equal(env.get_maximum_feedback(), float("inf"))
| [] |
2024-01-10 | rock-learning/bolero | examples~behavior_search~plot_cart_pole.py | """
=========
Cart Pole
=========
This is an example of how to use the Cart Pole environment from OpenAI Gym
via the wrapper that is provided with BOLeRo. A linear policy is sufficient
to solve the problem and policy search algorithm usually work very well in
this domain.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from bolero.environment import OpenAiGym
from bolero.behavior_search import BlackBoxSearch
from bolero.optimizer import CMAESOptimizer
from bolero.representation import LinearBehavior
from bolero.controller import Controller
beh = LinearBehavior()
env = OpenAiGym("CartPole-v0", render=False, seed=0)
opt = CMAESOptimizer(variance=10.0 ** 2, random_state=0)
bs = BlackBoxSearch(beh, opt)
controller = Controller(environment=env, behavior_search=bs, n_episodes=300)
rewards = controller.learn()
controller.episode_with(bs.get_best_behavior())
plt.figure()
ax = plt.subplot(111)
ax.set_title("Optimization progress")
ax.plot(rewards)
ax.set_xlabel("Episode")
ax.set_ylabel("Reward")
ax.set_ylim(-10, 210)
plt.show()
| [] |
2024-01-10 | rock-learning/bolero | bolero~behavior_search~test~test_monte_carlo_rl.py | from bolero.environment import OpenAiGym, gym_available
if not gym_available:
from nose import SkipTest
raise SkipTest("gym is not installed")
from bolero.behavior_search import MonteCarloRL
from bolero.controller import Controller
from nose.tools import assert_less, assert_equal
from nose import SkipTest
def test_mc_rl():
env = OpenAiGym("FrozenLake-v0", render=False, seed=1)
try:
env.init()
except ImportError:
raise SkipTest("gym is not installed")
bs = MonteCarloRL(env.get_discrete_action_space(), random_state=1)
ctrl = Controller(environment=env, behavior_search=bs, n_episodes=10000,
finish_after_convergence=True)
returns = ctrl.learn()
assert_less(len(returns), 1000)
beh = bs.get_best_behavior()
rewards = ctrl.episode_with(beh)
assert_equal(sum(rewards), 1.0) | [] |
2024-01-10 | rock-learning/bolero | examples~behavior_search~plot_discrete_problem.py | """
================
Discrete Problem
================
A simple problem with a discrete state and action space is solved with
a tabular reinforcement learning algorithm. The plot shows the obtained return
for each episode. Successful episodes terminate with the return 1, otherwise
the return is 0. The learning process is stopped when the value function
converged.
"""
print(__doc__)
import matplotlib.pyplot as plt
from bolero.environment import OpenAiGym
from bolero.behavior_search import MonteCarloRL
from bolero.controller import Controller
env = OpenAiGym("FrozenLake-v0", render=False, seed=1)
env.init()
bs = MonteCarloRL(env.get_discrete_action_space(), random_state=1)
ctrl = Controller(environment=env, behavior_search=bs, n_episodes=10000,
finish_after_convergence=True)
rewards = ctrl.learn()
plt.figure()
ax = plt.subplot(111)
ax.set_title("Learning progress")
ax.plot(rewards)
ax.set_xlabel("Episode")
ax.set_ylabel("Reward")
plt.show()
| [] |
2024-01-10 | cfa532/langchain | langchain~vectorstores~elastic_vector_search.py | """Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from abc import ABC
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Union,
)
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
def _default_text_mapping(dim: int) -> Dict:
return {
"properties": {
"text": {"type": "text"},
"vector": {"type": "dense_vector", "dims": dim},
}
}
def _default_script_query(query_vector: List[float], filter: Optional[dict]) -> Dict:
if filter:
((key, value),) = filter.items()
filter = {"match": {f"metadata.{key}.keyword": f"{value}"}}
else:
filter = {"match_all": {}}
return {
"script_score": {
"query": filter,
"script": {
"source": "cosineSimilarity(params.query_vector, 'vector') + 1.0",
"params": {"query_vector": query_vector},
},
}
}
# ElasticVectorSearch is a concrete implementation of the abstract base class
# VectorStore, which defines a common interface for all vector database
# implementations. By inheriting from the ABC class, ElasticVectorSearch can be
# defined as an abstract base class itself, allowing the creation of subclasses with
# their own specific implementations. If you plan to subclass ElasticVectorSearch,
# you can inherit from it and define your own implementation of the necessary methods
# and attributes.
class ElasticVectorSearch(VectorStore, ABC):
"""Wrapper around Elasticsearch as a vector database.
To connect to an Elasticsearch instance that does not require
login credentials, pass the Elasticsearch URL and index name along with the
embedding object to the constructor.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch(
elasticsearch_url="http://localhost:9200",
index_name="test_index",
embedding=embedding
)
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
elastic_host = "cluster_id.region_id.gcp.cloud.es.io"
elasticsearch_url = f"https://username:password@{elastic_host}:9243"
elastic_vector_search = ElasticVectorSearch(
elasticsearch_url=elasticsearch_url,
index_name="test_index",
embedding=embedding
)
Args:
elasticsearch_url (str): The URL for the Elasticsearch instance.
index_name (str): The name of the Elasticsearch index for the embeddings.
embedding (Embeddings): An object that provides the ability to embed text.
It should be an instance of a class that subclasses the Embeddings
abstract base class, such as OpenAIEmbeddings()
Raises:
ValueError: If the elasticsearch python package is not installed.
"""
def __init__(
self,
elasticsearch_url: str,
index_name: str,
embedding: Embeddings,
*,
ssl_verify: Optional[Dict[str, Any]] = None,
):
"""Initialize with necessary components."""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
self.embedding = embedding
self.index_name = index_name
_ssl_verify = ssl_verify or {}
try:
self.client = elasticsearch.Elasticsearch(elasticsearch_url, **_ssl_verify)
except ValueError as e:
raise ValueError(
f"Your elasticsearch client string is mis-formatted. Got error: {e} "
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
refresh_indices: bool = True,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the vectorstore.
"""
try:
from elasticsearch.exceptions import NotFoundError
from elasticsearch.helpers import bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = ids or [str(uuid.uuid4()) for _ in texts]
embeddings = self.embedding.embed_documents(list(texts))
dim = len(embeddings[0])
mapping = _default_text_mapping(dim)
# check to see if the index already exists
try:
self.client.indices.get(index=self.index_name)
except NotFoundError:
# TODO would be nice to create index before embedding,
# just to save expensive steps for last
self.create_index(self.client, self.index_name, mapping)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
request = {
"_op_type": "index",
"_index": self.index_name,
"vector": embeddings[i],
"text": text,
"metadata": metadata,
"_id": ids[i],
}
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
def similarity_search(
self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter)
documents = [d[0] for d in docs_and_scores]
return documents
def similarity_search_with_score(
self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(query)
script_query = _default_script_query(embedding, filter)
response = self.client_search(
self.client, self.index_name, script_query, size=k
)
hits = [hit for hit in response["hits"]["hits"]]
docs_and_scores = [
(
Document(
page_content=hit["_source"]["text"],
metadata=hit["_source"]["metadata"],
),
hit["_score"],
)
for hit in hits
]
return docs_and_scores
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
elasticsearch_url: Optional[str] = None,
index_name: Optional[str] = None,
refresh_indices: bool = True,
**kwargs: Any,
) -> ElasticVectorSearch:
"""Construct ElasticVectorSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Elasticsearch instance.
3. Adds the documents to the newly created Elasticsearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch.from_texts(
texts,
embeddings,
elasticsearch_url="http://localhost:9200"
)
"""
elasticsearch_url = elasticsearch_url or get_from_env(
"elasticsearch_url", "ELASTICSEARCH_URL"
)
index_name = index_name or uuid.uuid4().hex
vectorsearch = cls(elasticsearch_url, index_name, embedding, **kwargs)
vectorsearch.add_texts(
texts, metadatas=metadatas, ids=ids, refresh_indices=refresh_indices
)
return vectorsearch
def create_index(self, client: Any, index_name: str, mapping: Dict) -> None:
version_num = client.info()["version"]["number"][0]
version_num = int(version_num)
if version_num >= 8:
client.indices.create(index=index_name, mappings=mapping)
else:
client.indices.create(index=index_name, body={"mappings": mapping})
def client_search(
self, client: Any, index_name: str, script_query: Dict, size: int
) -> Any:
version_num = client.info()["version"]["number"][0]
version_num = int(version_num)
if version_num >= 8:
response = client.search(index=index_name, query=script_query, size=size)
else:
response = client.search(
index=index_name, body={"query": script_query, "size": size}
)
return response
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
# TODO: Check if this can be done in bulk
for id in ids:
self.client.delete(index=self.index_name, id=id)
class ElasticKnnSearch(ElasticVectorSearch):
"""
A class for performing k-Nearest Neighbors (k-NN) search on an Elasticsearch index.
The class is designed for a text search scenario where documents are text strings
and their embeddings are vector representations of those strings.
"""
def __init__(
self,
index_name: str,
embedding: Embeddings,
es_connection: Optional["Elasticsearch"] = None,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
vector_query_field: Optional[str] = "vector",
query_field: Optional[str] = "text",
):
"""
Initializes an instance of the ElasticKnnSearch class and sets up the
Elasticsearch client.
Args:
index_name: The name of the Elasticsearch index.
embedding: An instance of the Embeddings class, used to generate vector
representations of text strings.
es_connection: An existing Elasticsearch connection.
es_cloud_id: The Cloud ID of the Elasticsearch instance. Required if
creating a new connection.
es_user: The username for the Elasticsearch instance. Required if
creating a new connection.
es_password: The password for the Elasticsearch instance. Required if
creating a new connection.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
self.embedding = embedding
self.index_name = index_name
self.query_field = query_field
self.vector_query_field = vector_query_field
# If a pre-existing Elasticsearch connection is provided, use it.
if es_connection is not None:
self.client = es_connection
else:
# If credentials for a new Elasticsearch connection are provided,
# create a new connection.
if es_cloud_id and es_user and es_password:
self.client = elasticsearch.Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
else:
raise ValueError(
"""Either provide a pre-existing Elasticsearch connection, \
or valid credentials for creating a new connection."""
)
@staticmethod
def _default_knn_mapping(dims: int) -> Dict:
"""Generates a default index mapping for kNN search."""
return {
"properties": {
"text": {"type": "text"},
"vector": {
"type": "dense_vector",
"dims": dims,
"index": True,
"similarity": "dot_product",
},
}
}
def _default_knn_query(
self,
query_vector: Optional[List[float]] = None,
query: Optional[str] = None,
model_id: Optional[str] = None,
k: Optional[int] = 10,
num_candidates: Optional[int] = 10,
) -> Dict:
knn: Dict = {
"field": self.vector_query_field,
"k": k,
"num_candidates": num_candidates,
}
# Case 1: `query_vector` is provided, but not `model_id` -> use query_vector
if query_vector and not model_id:
knn["query_vector"] = query_vector
# Case 2: `query` and `model_id` are provided, -> use query_vector_builder
elif query and model_id:
knn["query_vector_builder"] = {
"text_embedding": {
"model_id": model_id, # use 'model_id' argument
"model_text": query, # use 'query' argument
}
}
else:
raise ValueError(
"Either `query_vector` or `model_id` must be provided, but not both."
)
return knn
def knn_search(
self,
query: Optional[str] = None,
k: Optional[int] = 10,
query_vector: Optional[List[float]] = None,
model_id: Optional[str] = None,
size: Optional[int] = 10,
source: Optional[bool] = True,
fields: Optional[
Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None]
] = None,
) -> Dict:
"""
Performs a k-nearest neighbor (k-NN) search on the Elasticsearch index.
The search can be conducted using either a raw query vector or a model ID.
The method first generates
the body of the search query, which can be interpreted by Elasticsearch.
It then performs the k-NN
search on the Elasticsearch index and returns the results.
Args:
query: The query or queries to be used for the search. Required if
`query_vector` is not provided.
k: The number of nearest neighbors to return. Defaults to 10.
query_vector: The query vector to be used for the search. Required if
`query` is not provided.
model_id: The ID of the model to use for generating the query vector, if
`query` is provided.
size: The number of search hits to return. Defaults to 10.
source: Whether to include the source of each hit in the results.
fields: The fields to include in the source of each hit. If None, all
fields are included.
vector_query_field: Field name to use in knn search if not default 'vector'
Returns:
The search results.
Raises:
ValueError: If neither `query_vector` nor `model_id` is provided, or if
both are provided.
"""
knn_query_body = self._default_knn_query(
query_vector=query_vector, query=query, model_id=model_id, k=k
)
# Perform the kNN search on the Elasticsearch index and return the results.
res = self.client.search(
index=self.index_name,
knn=knn_query_body,
size=size,
source=source,
fields=fields,
)
return dict(res)
def knn_hybrid_search(
self,
query: Optional[str] = None,
k: Optional[int] = 10,
query_vector: Optional[List[float]] = None,
model_id: Optional[str] = None,
size: Optional[int] = 10,
source: Optional[bool] = True,
knn_boost: Optional[float] = 0.9,
query_boost: Optional[float] = 0.1,
fields: Optional[
Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None]
] = None,
) -> Dict[Any, Any]:
"""Performs a hybrid k-nearest neighbor (k-NN) and text-based search on the
Elasticsearch index.
The search can be conducted using either a raw query vector or a model ID.
The method first generates
the body of the k-NN search query and the text-based query, which can be
interpreted by Elasticsearch.
It then performs the hybrid search on the Elasticsearch index and returns the
results.
Args:
query: The query or queries to be used for the search. Required if
`query_vector` is not provided.
k: The number of nearest neighbors to return. Defaults to 10.
query_vector: The query vector to be used for the search. Required if
`query` is not provided.
model_id: The ID of the model to use for generating the query vector, if
`query` is provided.
size: The number of search hits to return. Defaults to 10.
source: Whether to include the source of each hit in the results.
knn_boost: The boost factor for the k-NN part of the search.
query_boost: The boost factor for the text-based part of the search.
fields
The fields to include in the source of each hit. If None, all fields are
included. Defaults to None.
vector_query_field: Field name to use in knn search if not default 'vector'
query_field: Field name to use in search if not default 'text'
Returns:
The search results.
Raises:
ValueError: If neither `query_vector` nor `model_id` is provided, or if
both are provided.
"""
knn_query_body = self._default_knn_query(
query_vector=query_vector, query=query, model_id=model_id, k=k
)
# Modify the knn_query_body to add a "boost" parameter
knn_query_body["boost"] = knn_boost
# Generate the body of the standard Elasticsearch query
match_query_body = {
"match": {self.query_field: {"query": query, "boost": query_boost}}
}
# Perform the hybrid search on the Elasticsearch index and return the results.
res = self.client.search(
index=self.index_name,
query=match_query_body,
knn=knn_query_body,
fields=fields,
size=size,
source=source,
)
return dict(res)
| [] |
2024-01-10 | alvinrach/ecommerce-product-analyzer | modules.py | import pandas as pd
from scraper import Scraper
import os
import contractions
import re
import nltk
nltk.download('stopwords')
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import Normalizer
import random
import openai
class Reccom:
def __init__(self):
self.data = None
self.pure_data = None
self.similar_products = None
def load_data(self, rescraping=False):
csv_name = '20_tokopedia_products.csv'
if csv_name in os.listdir():
data = pd.read_csv(csv_name)
elif csv_name not in os.listdir() or rescraping:
a = Scraper()
data = a.get_data()
a.driver.quit()
data = pd.DataFrame(data)
data.to_csv(csv_name, index=False)
data['id'] = data.index
pure_data = data.copy()
self.data = data
self.pure_data = pure_data
def similarity(self, prod_id):
# Need to be changed to experimental fill dataframe form later
data = self.data.copy()
pure_data = self.pure_data.copy()
pure_data['category'] = ['wedding', 'wedding', 'baby', 'baby', 'wedding', 'general', 'baby', 'baby', 'baby', 'general', 'general', 'general', 'general', 'general', 'wedding', 'baby', 'wedding', 'general', 'general', 'wedding']
data = pure_data.copy()
# Cleaning the texts
def txtprocess(txt):
# Lower the texts
txt = str(txt).lower()
# Remove contractions
txt = contractions.fix(txt)
# Just pick the alphabet
txt = re.sub(r'[^a-zA-Z]', ' ', txt)
# Fix unnecessary space
txt = re.sub(' +', ' ', txt)
txt = ' '.join(txt.split())
return txt
data.Product = data.Product.map(txtprocess)
data.Description = data.Description.map(txtprocess)
# Cleaning stopwords
stop_words = set(nltk.corpus.stopwords.words('indonesian'))
stop_words.add('gift')
stop_words.add('hampers')
stop_words.add('hadiah')
stop_words.add('kado')
stop_words.add('x')
def remove_stopwords(txt):
no_stopword_txt = [w for w in txt.split() if not w in stop_words]
return ' '.join(no_stopword_txt)
data.Product = data.Product.map(remove_stopwords)
data.Description = data.Description.map(remove_stopwords)
# Feature Engineering
data['char_count'] = data['Description'].map(len)
data.Price = data.Price.str[2:].str.replace('.', '').astype(int)
def get_similarity_matrix(
weight_product = 0.4,
weight_description = 0.3,
weight_prices = 0.2,
weight_char_count = 0.1
):
# For product and description
tfidf_product = TfidfVectorizer()
product_vectors = tfidf_product.fit_transform(data.Product).toarray()
tfidf_description = TfidfVectorizer()
description_vectors = tfidf_description.fit_transform(data.Description).toarray()
product_similarity_matrix = cosine_similarity(product_vectors)
description_similarity_matrix = cosine_similarity(description_vectors)
# For prices and char count
normalized_prices = data.Price.values.reshape(1, -1)
normalized_char_count = data.char_count.values.reshape(1, -1)
scaler = Normalizer()
normalized_prices = scaler.fit_transform(normalized_prices)
normalized_char_count = scaler.fit_transform(normalized_char_count)
normalized_prices = cosine_similarity(normalized_prices)
normalized_char_count = cosine_similarity(normalized_char_count)
# Combined Similarity with weights
combined_similarity_matrix = (weight_product * product_similarity_matrix) + (weight_description * description_similarity_matrix) + (weight_prices * normalized_prices) + (weight_char_count * normalized_char_count)
return combined_similarity_matrix
combined_similarity_matrix = get_similarity_matrix(
weight_product = 0.4,
weight_description = 0.3,
weight_prices = 0.2,
weight_char_count = 0.1
)
def result(prod_id):
i = combined_similarity_matrix[prod_id]
a = i.argsort()[::-1][1:4]
b = sorted(i)[::-1][1:4]
recs = []
for j,k in zip(a,b):
rec = {}
rec['id'] = j
rec['sim_score'] = k
rec['relevant'] = pure_data.category[j] == pure_data.category[prod_id]
recs.append(rec)
return recs
data['result'] = data.id.map(result)
def three_similar_product(prod_id):
prods = []
for i in data.result[prod_id]:
prods.append(i['id'])
return prods
self.similar_products = three_similar_product(prod_id)
def improve(self, key, prod_id):
pure_data = self.pure_data.copy()
message = 'perbaiki deskripsi produk berikut sehingga menarik bagi pembeli namun mempunyai informasi yang padat \n\n'
message = message+pure_data.Description[prod_id]
openai.api_key = key
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{'role':'user', 'content':f'{message}'}],
temperature=0,
max_tokens=1024
)
return response['choices'][0]['message']['content'] | [
"PLACEHOLDER"
] |
2024-01-10 | Kurrawong/fair-ease-matcher | src~currently_unused~odv.py | import json
import os
from pathlib import Path
from langchain import PromptTemplate, OpenAI
from src.sparql_queries import find_vocabs_sparql, get_vocabs_from_sparql_endpoint
def read_file(file_path: Path):
return file_path.read_text()[:10000]
def create_odv_prompt(odv_text):
template = """\
The following data is the first 10000 characters from an Ocean Data View file.
There may be comment lines at the beginning of the file, which start with //.
I am interested in, for "value" columns:
1. Vocabularies/concepts used for the columns, these may be specified in columns with a URN, or they may not be specified at all.
2. Units/concepts for the same columns. These may also be specified with a URN, or not at all, or in the column heading itself or both.
I am not interested in "Local" URNs. These are of the form "SDN:LOCAL:ABCD". These are only used to map from the comments to the column labels in the data itself.
I am interested in non "Local" URNs. These are of the form "SDN:P01::ABCDEFG" These refer to external vocabularies.
I am also interested in Instrument and Observation Platform information if available.
If a column name is repeated multiple times, it's probably not actually a column - please ignore it.
Please extract this information based on the columns as JSON in the format below. For each column_name if an
attribute ("column_vocabulary_text", "column_vocabulary_urn", "column_unit_text", "column_unit_urn", "instrument",
"observation_platform") has information, include that attribute in the response, otherwise do not include it for
that column_name. "column_unit_text" is typically included in square brackets for example "[milligram/m3]".
{{
"columns": [
{{"column_name" :
{{
"column_vocabulary_text": "col vocab text",
"column_vocabulary_urn": "col vocab urn",
"column_unit_text": "col unit text",
"column_unit_urn": "col unit urn",
"instrument": "instrument text",
"observation_platform": "observation platform text"
}}
}}
]
}}
This is the first 10000 characters: {odv_text}
"""
prompt = PromptTemplate.from_template(template)
return prompt.format(odv_text=odv_text)
def get_urns_from_odv(odv_json):
# load json data
data = json.loads(odv_json)
# lists to store the vocabulary and unit URNs
vocab_urns = []
unit_urns = []
# go through the columns
for column in data["columns"]:
for field in column:
if "column_vocabulary_urn" in column[field]:
vocab_urns.append(column[field]["column_vocabulary_urn"])
if "column_unit_urn" in column[field]:
unit_urns.append(column[field]["column_unit_urn"])
if not vocab_urns and not unit_urns:
raise ValueError("No vocabulary or Unit URNs found")
return vocab_urns, unit_urns
def main():
odv_text = read_file(Path("../../data/000545_ODV_77AR2009_00095_H09_V0.txt"))
prompt = create_odv_prompt(odv_text)
llm = OpenAI(model_name="gpt-3.5-turbo-0613")
if os.getenv("TEST_MODE") == "true":
output = read_file(Path("../../tests/data/odv_response.json"))
else:
output = llm(prompt)
try:
variable_urns, unit_urns = get_urns_from_odv(output)
vocab_query = find_vocabs_sparql(variable_urns)
unit_query = find_vocabs_sparql(unit_urns)
collections_uris = get_vocabs_from_sparql_endpoint(vocab_query)
unit_collections_uris = get_vocabs_from_sparql_endpoint(unit_query)
print(collections_uris, unit_collections_uris)
except ValueError as e:
# try next option
pass
if __name__ == "__main__":
main()
| [
"instrument text",
"SDN:LOCAL:ABCD",
"column_vocabulary_urn",
"column_name",
"[milligram/m3]",
"column_vocabulary_text",
"instrument",
"col vocab urn",
"observation platform text",
"col vocab text",
"SDN:P01::ABCDEFG",
" The following data is the first 10000 characters from an Ocean Data View file.\n There may be comment lines at the beginning of the file, which start with //.\n I am interested in, for \"value\" columns:\n 1. Vocabularies/concepts used for the columns, these may be specified in columns with a URN, or they may not be specified at all.\n 2. Units/concepts for the same columns. These may also be specified with a URN, or not at all, or in the column heading itself or both.\n I am not interested in \"Local\" URNs. These are of the form \"SDN:LOCAL:ABCD\". These are only used to map from the comments to the column labels in the data itself.\n I am interested in non \"Local\" URNs. These are of the form \"SDN:P01::ABCDEFG\" These refer to external vocabularies.\n I am also interested in Instrument and Observation Platform information if available.\n If a column name is repeated multiple times, it's probably not actually a column - please ignore it.\n Please extract this information based on the columns as JSON in the format below. For each column_name if an \n attribute (\"column_vocabulary_text\", \"column_vocabulary_urn\", \"column_unit_text\", \"column_unit_urn\", \"instrument\", \n \"observation_platform\") has information, include that attribute in the response, otherwise do not include it for \n that column_name. \"column_unit_text\" is typically included in square brackets for example \"[milligram/m3]\".\n \n {{\n \"columns\": [\n {{\"column_name\" : \n {{\n \"column_vocabulary_text\": \"col vocab text\",\n \"column_vocabulary_urn\": \"col vocab urn\",\n \"column_unit_text\": \"col unit text\",\n \"column_unit_urn\": \"col unit urn\",\n \"instrument\": \"instrument text\",\n \"observation_platform\": \"observation platform text\"\n }}\n }}\n ]\n }}\n This is the first 10000 characters: {odv_text}\n ",
"column_unit_text",
"observation_platform",
"columns",
"col unit text",
"col unit urn",
"column_unit_urn"
] |
2024-01-10 | Abdulrhman-Alghamdi7/Astronomers-AI-Assistant | desk~AstroAiAssistant.py | import tkinter as tk
from datetime import datetime
import csv
import openai
from skyfield.api import load, Topos, utc
from datetime import datetime
from geopy.geocoders import Nominatim
from geopy.exc import GeocoderTimedOut
from dotenv import load_dotenv
import os
def configure():
load_dotenv()
configure()
api_key = os.getenv('api_key')
data = []
with open('/Users/abdulrhmanalghamdi/Library/Mobile Documents/com~apple~CloudDocs/programming💻/AstronomersAiAssistant/astronomicalEvents.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
data.append(row)
def askfordistance(a, model="gpt-3.5-turbo", max_tokens=10):
try:
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "You are a chatbot."},
{"role": "user", "content": f"give me just the distance from earth to {a} in 10 characters max"}
],
max_tokens=max_tokens,
api_key=api_key
)
if response.choices:
return response.choices[0].message["content"].strip()
else:
return "No answer provided."
except Exception as e:
return f"An error occurred: {str(e)}"
def askforinfo(a, model="gpt-3.5-turbo", max_tokens=100):
try:
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "You are a chatbot."},
{"role": "user", "content": f"give me a summary about {a} in 100 characters max"}
],
max_tokens=max_tokens,
api_key=api_key
)
if response.choices:
return response.choices[0].message["content"].strip()
else:
return "No answer provided."
except Exception as e:
return f"An error occurred: {str(e)}"
r = []
def get_user_location():
try:
geolocator = Nominatim(user_agent="get_user_location")
location = geolocator.geocode("")
if location:
return location.latitude, location.longitude
else:
r.append("\nUnable to determine your location. Using default location(0,0).")
default_latitude = 0.0
default_longitude = 0.0
return default_latitude, default_longitude
except GeocoderTimedOut:
r.append("\nGeocoding service timed out. Unable to determine your location. Using default location(0,0).")
default_latitude = 0.0
default_longitude = 0.0
return default_latitude, default_longitude
def get_celestial_body_info(body_name):
planets = load('de421.bsp')
object = planets[body_name]
observer_location = get_user_location()
if observer_location is not None:
observer_latitude, observer_longitude = observer_location
ts = load.timescale()
time = ts.now()
observer = Topos(observer_latitude, observer_longitude)
observer_position = observer.at(time)
object_position = object.at(time)
separation = object_position.separation_from(observer_position)
r.append(f'\nRight Ascension: {object_position.radec()[0].hours}\nDeclination: {object_position.radec()[1].degrees}\nseparation: {separation.degrees}')
return f"Name: {body_name}\nAbout: {askforinfo(body_name)} {''.join(r)}"
else:
return None
def get_events_in_date_range(start_date, end_date):
events_in_range = []
start_date = datetime.strptime(start_date, '%Y/%m/%d')
end_date = datetime.strptime(end_date, '%Y/%m/%d')
for event in data[1:]: # Skip the header row
event_start_date = datetime.strptime(event[2].strip(), '%Y/%m/%d')
event_end_date = datetime.strptime(event[3].strip(), '%Y/%m/%d')
if start_date <= event_end_date and end_date >= event_start_date:
events_in_range.append(event[1:])
s = ''
for i in events_in_range:
s += f'Event name: {i[0]}\nStart date: {i[1]}\nEnd date: {i[2]}\nEvent description: {i[3]}\n\n'
return s
def ask_gpt3_5_chat(question, model="gpt-3.5-turbo", max_tokens=500):
try:
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "You are a chatbot."},
{"role": "user", "content": question}
],
max_tokens=max_tokens,
api_key=api_key
)
if response.choices:
return response.choices[0].message["content"].strip()
else:
return "No answer provided."
except Exception as e:
return f"An error occurred: {str(e)}"
def chatelem():
oblabel.pack_forget()
objetb.pack_forget()
sendbutobj.pack_forget()
inflabel.pack_forget()
infrep.pack_forget()
evlabel.pack_forget()
sdlabel.pack_forget()
esdtb.pack_forget()
enlabel.pack_forget()
endtb.pack_forget()
sendbuteve.pack_forget()
infelabel.pack_forget()
inferep.pack_forget()
chlabel.pack()
chattb.pack()
sendbutt.pack()
replabel.pack()
chatrep.pack()
def eventselem():
chattb.pack_forget()
chatrep.pack_forget()
sendbutt.pack_forget()
chlabel.pack_forget()
replabel.pack_forget()
oblabel.pack_forget()
objetb.pack_forget()
sendbutobj.pack_forget()
inflabel.pack_forget()
infrep.pack_forget()
evlabel.pack()
sdlabel.pack()
esdtb.pack()
enlabel.pack()
endtb.pack()
sendbuteve.pack()
infelabel.pack()
inferep.pack()
def objectelem():
chattb.pack_forget()
chatrep.pack_forget()
sendbutt.pack_forget()
chlabel.pack_forget()
replabel.pack_forget()
evlabel.pack_forget()
sdlabel.pack_forget()
esdtb.pack_forget()
enlabel.pack_forget()
endtb.pack_forget()
sendbuteve.pack_forget()
infelabel.pack_forget()
inferep.pack_forget()
oblabel.pack()
objetb.pack()
sendbutobj.pack()
inflabel.pack()
infrep.pack()
def sendai():
q = chattb.get("1.0", "end-1c")
prompt = f"consider that you are an astronomer who can answer any astronomical or scientific question, but you cannot answer any non-astronomical or scientific question. You answer with (this is outside the scope of my knowledge). Based on the previous information, answer the following question: {q} within 500 characters."
chatrep.delete("1.0", "end-1c")
a = ask_gpt3_5_chat(prompt)
chatrep.insert("end", a)
chattb.delete("1.0", "end-1c")
def srhevnt():
sd = esdtb.get("1.0", "end-1c")
ed = endtb.get("1.0", "end-1c")
inferep.delete("1.0", "end-1c")
a = get_events_in_date_range(sd,ed)
inferep.insert("end", a)
esdtb.delete("1.0", "end-1c")
endtb.delete("1.0", "end-1c")
def srhobj():
obj = objetb.get("1.0","end-1c")
a = get_celestial_body_info(obj)
infrep.delete("1.0", "end-1c")
infrep.insert("end", a)
objetb.delete("1.0", "end-1c")
root = tk.Tk()
root.geometry("800x800")
root.title('Astronomers AI Assistant')
#chat
chlabel = tk.Label(root, text="Enter your Question")
chattb = tk.Text(root, height=2, width=100)
sendbutt = tk.Button(root, text="Send",command=sendai)
replabel = tk.Label(root, text="Replay")
chatrep = tk.Text(root, width=100, height=40)
#objects
oblabel = tk.Label(root, text="Enter objects scientific name")
objetb = tk.Text(root, height=2, width=100)
sendbutobj = tk.Button(root, text="Search",command=srhobj)
inflabel = tk.Label(root, text="Object info")
infrep = tk.Text(root, width=100, height=40)
#events
evlabel = tk.Label(root, text="Search for Astronomical events")
sdlabel = tk.Label(root, text="Start date")
esdtb = tk.Text(root,height=2, width=20)
enlabel = tk.Label(root, text="End date")
endtb = tk.Text(root,height=2, width=20)
sendbuteve = tk.Button(root, text="Search",command=srhevnt)
infelabel = tk.Label(root, text="Event info")
inferep = tk.Text(root, width=100, height=35)
label1 = tk.Label(root, text="Welcome to the Astronomers AI Assistant")
label1.pack()
button_chat = tk.Button(root, text="Chat with AI", command=chatelem)
button_chat.pack()
button_event = tk.Button(root, text="Saerch for Event", command=eventselem)
button_event.pack()
button_object = tk.Button(root, text="Search for Object", command=objectelem)
button_object.pack()
root.mainloop()
| [
"give me just the distance from earth to PLACEHOLDER in 10 characters max",
"consider that you are an astronomer who can answer any astronomical or scientific question, but you cannot answer any non-astronomical or scientific question. You answer with (this is outside the scope of my knowledge). Based on the previous information, answer the following question: PLACEHOLDER within 500 characters.",
"give me a summary about PLACEHOLDER in 100 characters max",
"You are a chatbot."
] |
2024-01-10 | InternLM/tutorial | langchain~demo~create_db.py | # 首先导入所需第三方库
from langchain.document_loaders import UnstructuredFileLoader
from langchain.document_loaders import UnstructuredMarkdownLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from tqdm import tqdm
import os
# 获取文件路径函数
def get_files(dir_path):
# args:dir_path,目标文件夹路径
file_list = []
for filepath, dirnames, filenames in os.walk(dir_path):
# os.walk 函数将递归遍历指定文件夹
for filename in filenames:
# 通过后缀名判断文件类型是否满足要求
if filename.endswith(".md"):
# 如果满足要求,将其绝对路径加入到结果列表
file_list.append(os.path.join(filepath, filename))
elif filename.endswith(".txt"):
file_list.append(os.path.join(filepath, filename))
return file_list
# 加载文件函数
def get_text(dir_path):
# args:dir_path,目标文件夹路径
# 首先调用上文定义的函数得到目标文件路径列表
file_lst = get_files(dir_path)
# docs 存放加载之后的纯文本对象
docs = []
# 遍历所有目标文件
for one_file in tqdm(file_lst):
file_type = one_file.split('.')[-1]
if file_type == 'md':
loader = UnstructuredMarkdownLoader(one_file)
elif file_type == 'txt':
loader = UnstructuredFileLoader(one_file)
else:
# 如果是不符合条件的文件,直接跳过
continue
docs.extend(loader.load())
return docs
# 目标文件夹
tar_dir = [
"/root/data/InternLM",
"/root/data/InternLM-XComposer",
"/root/data/lagent",
"/root/data/lmdeploy",
"/root/data/opencompass",
"/root/data/xtuner"
]
# 加载目标文件
docs = []
for dir_path in tar_dir:
docs.extend(get_text(dir_path))
# 对文本进行分块
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500, chunk_overlap=150)
split_docs = text_splitter.split_documents(docs[:10])
# 加载开源词向量模型
embeddings = HuggingFaceEmbeddings(model_name="/root/data/model/sentence-transformer")
# 构建向量数据库
# 定义持久化路径
persist_directory = 'data_base/vector_db/chroma'
# 加载数据库
vectordb = Chroma.from_documents(
documents=split_docs,
embedding=embeddings,
persist_directory=persist_directory # 允许我们将persist_directory目录保存到磁盘上
)
# 将加载的向量数据库持久化到磁盘上
vectordb.persist()
| [] |
2024-01-10 | InternLM/tutorial | langchain~demo~run_gradio.py | # 导入必要的库
import gradio as gr
from langchain.vectorstores import Chroma
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
import os
from LLM import InternLM_LLM
from langchain.prompts import PromptTemplate
def load_chain():
# 加载问答链
# 定义 Embeddings
embeddings = HuggingFaceEmbeddings(model_name="/root/data/model/sentence-transformer")
# 向量数据库持久化路径
persist_directory = 'data_base/vector_db/chroma'
# 加载数据库
vectordb = Chroma(
persist_directory=persist_directory, # 允许我们将persist_directory目录保存到磁盘上
embedding_function=embeddings
)
llm = InternLM_LLM(model_path = "/root/data/model/Shanghai_AI_Laboratory/internlm-chat-7b")
template = """使用以下上下文来回答最后的问题。如果你不知道答案,就说你不知道,不要试图编造答
案。尽量使答案简明扼要。总是在回答的最后说“谢谢你的提问!”。
{context}
问题: {question}
有用的回答:"""
QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context","question"],
template=template)
# 运行 chain
from langchain.chains import RetrievalQA
qa_chain = RetrievalQA.from_chain_type(llm,
retriever=vectordb.as_retriever(),
return_source_documents=True,
chain_type_kwargs={"prompt":QA_CHAIN_PROMPT})
return qa_chain
class Model_center():
"""
存储问答 Chain 的对象
"""
def __init__(self):
self.chain = load_chain()
def qa_chain_self_answer(self, question: str, chat_history: list = []):
"""
调用不带历史记录的问答链进行回答
"""
if question == None or len(question) < 1:
return "", chat_history
try:
chat_history.append(
(question, self.chain({"query": question})["result"]))
return "", chat_history
except Exception as e:
return e, chat_history
model_center = Model_center()
block = gr.Blocks()
with block as demo:
with gr.Row(equal_height=True):
with gr.Column(scale=15):
gr.Markdown("""<h1><center>InternLM</center></h1>
<center>书生浦语</center>
""")
# gr.Image(value=LOGO_PATH, scale=1, min_width=10,show_label=False, show_download_button=False)
with gr.Row():
with gr.Column(scale=4):
chatbot = gr.Chatbot(height=450, show_copy_button=True)
# 创建一个文本框组件,用于输入 prompt。
msg = gr.Textbox(label="Prompt/问题")
with gr.Row():
# 创建提交按钮。
db_wo_his_btn = gr.Button("Chat")
with gr.Row():
# 创建一个清除按钮,用于清除聊天机器人组件的内容。
clear = gr.ClearButton(
components=[chatbot], value="Clear console")
# 设置按钮的点击事件。当点击时,调用上面定义的 qa_chain_self_answer 函数,并传入用户的消息和聊天历史记录,然后更新文本框和聊天机器人组件。
db_wo_his_btn.click(model_center.qa_chain_self_answer, inputs=[
msg, chatbot], outputs=[msg, chatbot])
gr.Markdown("""提醒:<br>
1. 初始化数据库时间可能较长,请耐心等待。
2. 使用中如果出现异常,将会在文本输入框进行展示,请不要惊慌。 <br>
""")
# threads to consume the request
gr.close_all()
# 启动新的 Gradio 应用,设置分享功能为 True,并使用环境变量 PORT1 指定服务器端口。
# demo.launch(share=True, server_port=int(os.environ['PORT1']))
# 直接启动
demo.launch()
| [
"使用以下上下文来回答最后的问题。如果你不知道答案,就说你不知道,不要试图编造答\n 案。尽量使答案简明扼要。总是在回答的最后说“谢谢你的提问!”。\n {context}\n 问题: {question}\n 有用的回答:",
"question",
"context"
] |
2024-01-10 | project-typebuild/typebuild | typebuild~llm_functions.py | import streamlit as st
import openai
import os
import re
import time
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
from plugins.llms import get_llm_output
#----------FUNCTIONS TO GENERATE PROMPTS----------------
def col_names_and_types(df):
"""Given a df, it returns a string with the column names and types"""
col_names = df.columns
col_types = df.dtypes
col_names_and_types = []
for col_name, col_type in zip(col_names, col_types):
col_names_and_types.append(f"{col_name}: {col_type}")
col_names = '\n- '.join(col_names_and_types)
buf = f"""The Dataframe has the following columns:
- {col_names}"""
return buf
def get_sample_data_as_markdown(df):
"""
Gets a sample of 5 lines, if there are at least 3 lines.
Else, gets the whole thing.
"""
if len(df) > 5:
buf = df.head(5).to_markdown(index=False)
else:
buf = df.to_markdown(index=False)
return buf
def get_function_prompt(df, default=None):
"""Returns a prompt for GPT-3"""
prompt_for_table = ''
if st.sidebar.checkbox(f"Show sample data", value=False):
st.dataframe(df.sample(5))
prompt_for_table += col_names_and_types(df) + '\n'
buf = get_sample_data_as_markdown(df)
prompt_for_table += f"\nHERE IS SOME SAMPLE DATA:\n{buf}\n"
if not default:
default = ""
label = 'I want to'
if 'i_want_to' in st.session_state:
if st.session_state.i_want_to:
label = st.session_state.i_want_to
i_want_to = st.text_area(label, value=default)
# Check if there is a the_func in the session state
# with the name my_func. If yes, add this as context.
func_prompt = ''
if 'the_func' in st.session_state:
if 'my_func' in st.session_state.the_func:
func_prompt = f"FYI: This is the function I am trying to modify: {st.session_state.the_func}"
prompt = f"""
I am working on a dataframe named df.
{prompt_for_table}
{func_prompt}
I WANT TO: {i_want_to}
"""
return prompt
def clean_function(the_func):
"""
Cleans the function in a number of ways.
"""
# Remove the first line if just mentions the language
if the_func.startswith('python'):
the_func = '\n'.join(the_func.split('\n')[1:])
# Remove import statements
revised_func = ''
for line in the_func.split('\n'):
if line.startswith('import'):
pass
# If it's a function def, add it
elif line.startswith('def'):
revised_func += line + '\n'
# If it's calling my_func, pass
elif 'my_func' in line:
pass
else:
revised_func += line + '\n'
st.session_state.the_func = revised_func
return revised_func
def parse_code_or_requirements_from_response(response):
"""
The LLM can return code or requirements in the content.
Ideally, requirements come in triple pipe delimiters,
but sometimes they come in triple backticks.
Figure out which one it is and return the extracted code or requirements.
"""
# If there are ```, it could be code or requirements
code_or_requirement = None
if '```' in response:
# If it's python code, it should have at least one function in it
if 'def ' in response:
extracted = parse_code_from_response(response)
code_or_requirement = 'code'
# If it's not python code, it's probably requirements
else:
extracted = parse_modified_user_requirements_from_response(response)
code_or_requirement = 'requirements'
# If there are |||, it's probably requirements
elif '|||' in response:
extracted = parse_modified_user_requirements_from_response(response)
code_or_requirement = 'requirements'
else:
extracted = None
return extracted, code_or_requirement
def parse_code_from_response(response):
"""
Returns the code from the response from LLM.
In the prompt to code, we have asked the LLM to return the code inside triple backticks.
Args:
- response (str): The response from LLM
Returns:
- matches (list): A list of strings with the code
"""
pattern = r"```python([\s\S]*?)```"
matches = re.findall(pattern, response)
if len(matches) > 0:
matches = '\n'.join(matches)
else:
matches = matches[0]
return matches
def parse_modified_user_requirements_from_response(response):
"""
Returns the modified user requirements from the response from LLM.
In the prompt to modify, we have asked the LLM to return the modified user requirements inside triple pipe delimiters.
Args:
- response (str): The response from LLM
Returns:
- matches (list): A list of strings with the modified user requirements
"""
if '|||' in response:
pattern = r"\|\|\|([\s\S]*?)\|\|\|"
# It shouldnt have ```python in it
pattern = r"```([\s\S]*?)```"
matches = re.findall(pattern, response)
# if there are multiple matches, join by new line
if len(matches) > 0:
matches = '\n'.join(matches)
else:
matches = matches[0]
return matches
| [
"\n",
"\nHERE IS SOME SAMPLE DATA:\nPLACEHOLDER\n",
"\n I am working on a dataframe named df. \n\n PLACEHOLDER\n\n PLACEHOLDER\n \n I WANT TO: PLACEHOLDER\n\n "
] |
2024-01-10 | project-typebuild/typebuild | typebuild~plugins~llms.py | import os
import re
import streamlit as st
import openai
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
dir_path = os.path.dirname(os.path.realpath(__file__))
import sys
sys.path.append(st.session_state.typebuild_root)
def last_few_messages(messages):
"""
Long messages get rejected by the LLM. So,
- Will keep the system message, which is the first message
- Will keep the last 3 user and system messages
"""
last_messages = []
if messages:
# Get the first message
last_messages.append(messages[0])
# Get the last 3 user or assistant messages
user_assistant_messages = [i for i in messages if i['role'] in ['user', 'assistant']]
last_messages.extend(user_assistant_messages[-7:])
return last_messages
def extract_message_to_agent(content):
"""
Extracts the message to the agent from the content.
This is found within <<< and >>>.
There will at least be one set of triple angle brackets
for this function to be invoked.
"""
pattern = r"<<<([\s\S]*?)>>>"
matches = re.findall(pattern, content)
if len(matches) == 1:
message_to_agent = matches[0].strip()
else:
message_to_agent = '\n'.join(matches)
# Add it to the session state
st.session_state.message_to_agent = message_to_agent
return message_to_agent
def get_llm_output(messages, max_tokens=2500, temperature=0.4, model='gpt-4', functions=[]):
"""
This checks if there is a custom_llm.py in the plugins directory
If there is, it uses that.
If not, it uses the openai llm.
"""
# Check if there is a custom_llm.py in the plugins directory
# If there is, use that
# Get just the last few messages
messages = last_few_messages(messages)
st.session_state.last_request = messages
typebuild_root = st.session_state.typebuild_root
if os.path.exists(os.path.join(typebuild_root, 'custom_llm.py')):
from custom_llm import custom_llm_output
content = custom_llm_output(messages, max_tokens=max_tokens, temperature=temperature, model=model, functions=functions)
# If claude is requested and available, use claude
elif model == 'claude-2' and 'claude_key' in st.session_state:
content = get_claude_response(messages, max_tokens=max_tokens)
else:
model = 'gpt-3.5-turbo'
msg = get_openai_output(messages, max_tokens=max_tokens, temperature=temperature, model=model, functions=functions)
content = msg.get('content', None)
if 'function_call' in msg:
func_call = msg.get('function_call', None)
st.session_state.last_function_call = func_call
st.sidebar.info("Got a function call from LLM")
content = func_call.get('content', None)
# progress_status.info("Extracting information from response...")
if content:
st.session_state.last_response = content
# We can get back code or requirements in multiple forms
# Look for each form and extract the code or requirements
# Recent GPT models return function_call as a separate json object
# Look for that first.
# If there are triple backticks, we expect code
if '```' in str(content) or '|||' in str(content):
# NOTE: THERE IS AN ASSUMPTION THAT WE CAN'T GET BOTH CODE AND REQUIREMENTS
extracted, function_name = parse_func_call_info(content)
func_call = {'name': function_name, 'arguments': {'content':extracted}}
st.session_state.last_function_call = func_call
# Stop ask llm
st.session_state.ask_llm = False
# progress_status.success('Response generated!')
return content
def get_openai_output(messages, max_tokens=3000, temperature=0.4, model='gpt-4', functions=[]):
"""
Gets the output from GPT models. default is gpt-4.
Args:
- messages (list): A list of messages in the format
messages =[
{"role": "system", "content": system_instruction},
{"role": "user", "content": prompt}],
system_instruction is the instruction given to the system to generate the response using the prompt.
- model (str): The model to use. Default is gpt-4.
- max_tokens (int): The maximum number of tokens to generate, default 800
- temperature (float): The temperature for the model. The higher the temperature, the more random the output
"""
if functions:
response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages = messages,
max_tokens=max_tokens,
temperature=temperature,
n=1,
functions=functions,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages = messages,
max_tokens=max_tokens,
temperature=temperature,
n=1,
)
msg = response.choices[0].message
# Stop ask llm
st.session_state.ask_llm = False
return msg
def get_claude_response(messages, max_tokens=2000):
anthropic = Anthropic(
api_key=st.session_state.claude_key,
)
# Since claude has a higher max_tokens, let's increase the limit
max_tokens = int(max_tokens * 2)
prompt = ""
for i in messages:
if i['role'] == 'assistant':
prompt += f"{AI_PROMPT} {i['content']}\n\n"
else:
prompt += f"{HUMAN_PROMPT} {i['content']}\n\n"
prompt += AI_PROMPT
response = anthropic.completions.create(
prompt=prompt,
stop_sequences = [anthropic.HUMAN_PROMPT],
model="claude-2",
temperature=0.4,
max_tokens_to_sample=max_tokens,
)
return response.completion
def parse_func_call_info(response):
"""
The LLM can return code or requirements in the content.
Ideally, requirements come in triple pipe delimiters,
but sometimes they come in triple backticks.
Figure out which one it is and return the extracted code or requirements.
"""
# If there are ```, it could be code or requirements
function_name = None
if '```' in response:
# If it's python code, it should have at least one function in it
if 'def ' in response:
extracted = parse_code_from_response(response)
function_name = 'save_code_to_file'
elif 'data_agent' in response:
extracted = parse_modified_user_requirements_from_response(response)
function_name = 'data_agent'
# If it's not python code, it's probably requirements
else:
extracted = parse_modified_user_requirements_from_response(response)
function_name = 'save_requirements_to_file'
# If there are |||, it's probably requirements
elif '|||' in response:
extracted = parse_modified_user_requirements_from_response(response)
function_name = 'save_requirements_to_file'
else:
extracted = None
return extracted, function_name
def parse_code_from_response(response):
"""
Returns the code from the response from LLM.
In the prompt to code, we have asked the LLM to return the code inside triple backticks.
Args:
- response (str): The response from LLM
Returns:
- matches (list): A list of strings with the code
"""
pattern = r"```python([\s\S]*?)```"
matches = re.findall(pattern, response)
if len(matches) > 0:
matches = '\n'.join(matches)
else:
matches = matches[0]
return matches
def parse_modified_user_requirements_from_response(response):
"""
Returns the modified user requirements from the response from LLM.
In the prompt to modify, we have asked the LLM to return the modified user requirements inside triple pipe delimiters.
Args:
- response (str): The response from LLM
Returns:
- matches (list): A list of strings with the modified user requirements
"""
if '|||' in response:
pattern = r"\|\|\|([\s\S]*?)\|\|\|"
if '```' in response:
# It shouldnt have ```python in it
pattern = r"```([\s\S]*?)```"
matches = re.findall(pattern, response)
# if there are multiple matches, join by new line
if len(matches) > 0:
matches = '\n'.join(matches)
else:
matches = matches[0]
return matches
| [
"PLACEHOLDER PLACEHOLDER\n\n"
] |
2024-01-10 | project-typebuild/typebuild | typebuild~project_management.py | # NOTE: THIS FILE HAS BEEN MIGRATED
"""
Create projects, upload files, fetch file names in the project
and other aspects of understanding and manageing assets in the project folder
"""
import openai
from glob import glob
import os
import time
from helpers import text_areas
from plugins.llms import get_llm_output
from documents_management import create_document_chunk_df
from menu import reset_menu
from project_management_data import get_column_info, get_data_model
from session_state_management import change_ss_for_project_change
import streamlit as st
import pandas as pd
import prompts
from streamlit_option_menu import option_menu
from streamlit_extras.add_vertical_space import add_vertical_space
import extra_streamlit_components as stx
import sqlite3
import json
import toml
import sys
from home_page import home_page, what_can_you_do
home_dir = os.path.expanduser("~")
# Append the dir_path to the sys path
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path)
plugins_path = os.path.join(dir_path, 'plugins')
sys.path.append(plugins_path)
# NOT MIGRATED: NOT USED ANYWHERE
def get_project_database():
"""
This function establishes a connection to the project database. It will loop through the existing tables and create a dictionary of table_name and top two rows of the table.
Args:
- None
Returns:
- table_dict (dict): A dictionary of table_name and top two rows of the table in markdown format.
"""
# Get the list of tables in the database
con = sqlite3.connect(os.path.join(st.session_state.project_folder, 'data.db'))
st.session_state.con = con
tables = pd.read_sql_query("SELECT name FROM sqlite_master WHERE type='table'", con=st.session_state.con)
table_names = tables['name'].tolist()
st.warning(f'The following tables are available in the database:{table_names}')
# Create a dictionary of table_name and top two rows of the table
table_dict = {}
for table_name in table_names:
table_dict[table_name] = pd.read_sql_query(f"SELECT * FROM {table_name}", con).head(2).to_markdown()
return table_dict
# MIGRATED
def get_project_file_folder():
"""
Returns the path to the project folder for the current user.
Projects are folders within the user's folder.
Project file is the path to the main.py file.
Args:
- None
Returns:
- project_file (str): The path to the project file.
"""
user_folder = st.session_state.user_folder
# Get just the directory names, ignore the files
try:
project_names = [i for i in os.listdir(user_folder) if os.path.isdir(os.path.join(user_folder, i))]
except FileNotFoundError as e:
# Create the folder
os.makedirs(user_folder)
project_names = []
# Ignore pycache
project_names = [i for i in project_names if not 'pycache' in i]
# Project names does not start in '.'
project_names = [i for i in project_names if not i.startswith('.')]
# Add new create project option
project_names.append('Create new project')
# Make the first project the default
default_index = 0
# If no project is selected, select the first project
if 'selected_project' not in st.session_state:
st.session_state.selected_project = project_names[default_index]
# Project names in menu have "~" in them.
if "~" in st.session_state.new_menu:
selected_project = st.session_state.new_menu.split("~")[1]
# If it is different from the current project, change the project
# and change session state
if selected_project != st.session_state.selected_project:
st.session_state.selected_project = selected_project
change_ss_for_project_change()
if selected_project != 'Create new project':
project_name = selected_project.replace('_', ' ').upper() + ' PROJECT'
st.header(project_name, divider='rainbow')
st.info(f"Please go to 'Functionalities' in the menu above and select what you want to do next. Your options are given below.")
what_can_you_do()
selected_project = st.session_state.selected_project
if selected_project == 'Create new project':
create_new_project()
st.stop()
project_folder = os.path.join(st.session_state.user_folder, selected_project)
# Save to session state
st.session_state.project_folder = project_folder
return None
# NOT MIGRATED: NOT USED ANYWHERE
def add_llm_data_model(data_model_pkl_file):
import pickle as pk
with open(data_model_pkl_file, 'rb') as f:
llm_model = pk.load(f)
llm_data = ""
for file_name in llm_model:
llm_data += f"File path: {file_name}\n"
system_instruction_path = llm_model[file_name]['system_instruction_path']
# Open the system instruction
with open(system_instruction_path, 'r') as f:
si = f.read()
# Add the system instruction to the llm_data for context
llm_data += f"This table was created using an llm who got this instruction: {si}\n"
# Add column info to the llm_data
col_info = llm_model[file_name]['col_info']
llm_data += f"Column info:\n{col_info}\n"
return llm_data
# NOT MIGRATED: NOT USED ANYWHERE
def show_project_settings():
show_settings = False
if st.session_state.new_menu == 'data':
show_settings = True
project_folder = st.session_state.project_folder
data_model_file = os.path.join(project_folder, 'data_model.parquet')
if not os.path.exists(data_model_file):
show_settings = True
return show_settings
# NOT MIGRATED: NEW MENU SYSTEM
def manage_project():
"""
Allows the user to manage key aspects of the selected project:
- Manage data
- Set / edit project description
"""
show_settings = show_project_settings()
if show_settings:
project_settings()
return None
# MIGRATED
def project_settings():
"""
This function allows the user to manage key aspects of the selected project:
- Manage data
- Set / edit project description
"""
# Get the project folder
project_folder = st.session_state.project_folder
# If the file called data_model.parquet is missing, toggle the manage project button
data_model_file = os.path.join(project_folder, 'data_model.parquet')
if os.path.exists(data_model_file):
# Add data description to session state
st.session_state.data_description = pd.read_parquet(data_model_file).to_markdown(index=False)
options = [
'Upload your data',
'Fetch data',
]
default_index = 0
selected_option = st.radio(
"Select an option",
options,
captions=["CSV, XLSX, TXT, VTT, etc.", "YouTube, Google Search"],
horizontal=True,
index=default_index
)
st.markdown("---")
if selected_option == 'Upload your data':
file_upload_and_save()
get_data_model()
st.stop()
if selected_option == 'Fetch data':
if st.checkbox("Get data from YouTube"):
from tools.yt_search import main as yt_search
yt_search()
st.warning("Uncheck get data from YouTube to go to project settings")
if st.checkbox("Get data from Google"):
from tools.google_search import main as google_search
google_search()
st.warning("Uncheck get data from Google to go to project settings")
st.stop()
if selected_option == 'Project description (optional)':
ideate_project()
st.stop()
return None
# MIGRATED
def ideate_project():
"""
This stores the user requirement for the given view,
based on the selected menu.
"""
file_path = os.path.join(st.session_state.project_folder, 'project_settings', 'project_description.txt')
key = 'Project Description'
widget_label = 'Project Description'
st.subheader('Ideate')
project_description = text_areas(file=file_path, key=key, widget_label=widget_label)
# Save to session state
st.session_state.project_description = project_description
ideation_chat()
return None
# MIGRATED
def ideation_chat():
"""
A chat on the project description.
That could be exported to the project description file.
"""
# If there is no project description chat in the session state, create one
if 'ideation_chat' not in st.session_state:
st.session_state.ideation_chat = []
chat_container = st.container()
prompt = st.chat_input("Enter your message", key='project_description_chat_input')
if prompt:
# Create the messages from the prompts file
prompts.blueprint_prompt_structure(prompt=prompt)
with st.spinner('Generating response...'):
res = get_llm_output(
st.session_state.ideation_chat,
model='gpt-3.5-turbo-16k'
)
# Add the response to the chat
st.session_state.ideation_chat.append({'role': 'assistant', 'content': res})
# Display the user and assistant messages
with chat_container:
for msg in st.session_state.ideation_chat:
if msg['role'] in ['user', 'assistant']:
with st.chat_message(msg['role']):
st.markdown(msg['content'])
return None
# MIGRATED
def set_user_requirements():
"""
This stores the user requirement for the given view,
based on the selected menu.
"""
file_path = os.path.join(st.session_state.project_folder, 'project_settings', 'user_requirements.txt')
key = 'User Requirements'
widget_label = 'User Requirements'
st.subheader('User requirements')
user_requirements = text_areas(file=file_path, key=key, widget_label=widget_label)
# Save to session state
st.session_state.user_requirements = user_requirements
user_requirements_chat()
st.stop()
return None
# MIGRATED
def user_requirements_chat():
"""
A chat on the user requirements.
That could be exported to the user requirements file.
"""
# If there is no user requirements chat in the session state, create one
if 'user_requirements_chat' not in st.session_state:
st.session_state.user_requirements_chat = []
chat_container = st.container()
prompt = st.chat_input("Enter your message", key='user_requirements_chat_input')
if prompt:
# Create the messages from the prompts file
prompts.blueprint_prompt_structure(prompt=prompt)
with st.spinner('Generating response...'):
res = get_llm_output(st.session_state.user_requirements_chat, model='gpt-3.5-turbo-16k')
# Add the response to the chat
st.session_state.user_requirements_chat.append({'role': 'assistant', 'content': res})
# Display the user and assistant messages
with chat_container:
for msg in st.session_state.user_requirements_chat:
if msg['role'] in ['user', 'assistant']:
with st.chat_message(msg['role']):
st.markdown(msg['content'])
return None
# MIGRATED
def create_new_project():
"""
Creates a new project folder, main.py file, and __init__.py file.
TODO: Need to call this somewhere.
"""
# Get the project name
project_name = st.text_input("Enter the project name")
if project_name == '':
st.warning('Enter a project name')
st.stop()
# Lower case and replace spaces with underscores
project_name = project_name.lower().replace(' ', '_')
# Check if the project name already exists
# token_name = st.session_state.token
# user_folder = os.path.join('users', token_name)
# Create the path to the .typebuild directory
user_folder = st.session_state.user_folder
# Check if the directory exists
if not os.path.exists(user_folder):
# Create the directory if it doesn't exist
os.makedirs(user_folder)
# Project folder is project name inside the user folder
project_folder = os.path.join(user_folder, project_name)
if os.path.exists(project_folder):
st.write('Project already exists, please rename')
st.stop()
st.session_state.project_folder = project_folder
# Create the project folder
if not os.path.exists(project_folder):
os.makedirs(project_folder)
data_folder = os.path.join(project_folder, 'data')
views_folder = os.path.join(project_folder, 'views')
project_settings_folder = os.path.join(project_folder, 'project_settings')
# Create these folders if they do not exist
if not os.path.exists(data_folder):
os.makedirs(data_folder)
if not os.path.exists(views_folder):
os.makedirs(views_folder)
if not os.path.exists(project_settings_folder):
os.makedirs(project_settings_folder)
# Create the __init__.py file
init_file = os.path.join(project_folder, '__init__.py')
if not os.path.exists(init_file):
with open(init_file, 'w') as f:
f.write('')
st.success("Created the project. Taking you to it now...")
# Save to session state
st.session_state.project_folder = project_folder
# Increment session number
st.session_state.ss_num += 1
st.session_state[f'selected_project_{st.session_state.ss_num}'] = project_name
st.session_state.selected_project = project_name
time.sleep(2)
# Take user to project settings
reset_menu(new_menu='project_settings')
return None
# NOT MIGRATED: NOT USED ANYWHERE
def get_project_df():
"""
This function gets the project dataframe from the project folder.
"""
files = glob(os.path.join(st.session_state.project_folder, 'data', '*.parquet'))
if len(files) > 0:
# Get the list of files in the project folder
files = glob(os.path.join(st.session_state.project_folder, 'data', '*.parquet'))
# Ask the user to select a file to append data to
selected_file = st.selectbox("Select a file to append data to", files)
# Load the file as a dataframe
df = pd.read_parquet(selected_file)
# Show the dataframe
st.dataframe(df)
return df
return None
# MIGRATED
def export_sqlite_to_parquet(uploaded_file, output_dir):
tmp_folder = os.path.join(st.session_state.project_folder, 'documents')
# Create the tmp folder if it does not exist
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
with open(os.path.join(tmp_folder, 'tmp.sqlite'), 'wb') as f:
f.write(uploaded_file.read())
# Connect to the SQLite database
conn = sqlite3.connect(os.path.join(tmp_folder, 'tmp.sqlite'))
# Get the list of all tables in the database
query = "SELECT name FROM sqlite_master WHERE type='table';"
tables = conn.execute(query).fetchall()
tables = [table[0] for table in tables]
tables = st.multiselect("Select tables to import", tables)
if st.button("Import these tables"):
# Ensure output directory exists
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# For each table, read it into a pandas DataFrame and then write it to a Parquet file
for table in tables:
df = pd.read_sql(f"SELECT * FROM {table}", conn)
df.to_parquet(os.path.join(output_dir, f"{table}.parquet"), index=False)
# Close the SQLite connection
conn.close()
return None
# MIGRATED
def upload_data_file(uploaded_file, file_extension):
"""
Upload data files to import them into the project.
"""
data_folder = os.path.join(st.session_state.project_folder, 'data')
# Load the file as a dataframe
if file_extension == 'csv':
df = pd.read_csv(uploaded_file)
elif file_extension == 'parquet':
df = pd.read_parquet(uploaded_file)
elif file_extension == 'tsv':
df = pd.read_csv(uploaded_file, sep='\t')
elif file_extension in ['xlsx']:
df = pd.read_excel(uploaded_file)
else:
st.error(f'File type {file_extension} not supported')
st.stop()
# Clean column names. Strip, lower case, replace spaces with underscores
df.columns = [i.strip().lower().replace(' ', '_') for i in df.columns]
# Create a streamlit form, with all columns and data types and allow the user to edit the data types
# Get the list of column names
col_names = df.columns.tolist()
# If there are duplicate column names, add _1, _2, etc. to the end of the column name
for i, col_name in enumerate(col_names):
if col_names.count(col_name) > 1:
col_names[i] = col_name + '_' + str(col_names[:i].count(col_name) + 1)
# Rename the columns with the updated column names
df.columns = col_names
# If there are duplicate col names, add _1, _2, etc. to the end of the col name
# Get the list of col names
# Get the column names and data types
# Get the column names and data types
all_col_infos = []
for column in df.columns:
column_info = {}
column_info['column_name'] = column
column_info['column_type'] = str(df[column].dtype)
column_info['column_info'] = ''
all_col_infos.append(column_info)
# Update the data types of the dataframe
for col_info in all_col_infos:
col_name = col_info['column_name']
col_type = col_info['column_type']
if col_type != 'object':
df[col_name] = df[col_name].astype(col_type)
# Show the dataframe
st.dataframe(df)
# Get the name of the uploaded file
file_name = uploaded_file.name
# Remove the file extension
file_name = file_name.replace(f'.{file_extension}', '')
st.info("Once you save the data, we will explore a few lines of data to a language model to understand the data. This will help us later to generate code for the data.")
# Create a button to save the file as a parquet file with the same name
if st.button('Save Data'):
# Save the file to the data folder
file_path = os.path.join(data_folder, file_name + '.parquet')
# Create folder if it does not exist
folder_name = os.path.dirname(file_path)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
df = clean_col_formats(df)
df.to_parquet(file_path, index=False)
st.success(f'File saved successfully')
st.session_state.files_uploaded = False
return None
# MIGRATED
def clean_col_formats(df):
"""
Apache arrow requires clearn formatting. Look at the
column names and data types and clean them up.
Try saving as column type and see if it works. If not, save as string.
Args:
- df (dataframe): The dataframe to clean
Returns:
- df (dataframe): The cleaned dataframe
"""
# Get the list of column names
col_names = df.columns.tolist()
# If there are duplicate column names, add _1, _2, etc. to the end of the column name
for i, col_name in enumerate(col_names):
if col_names.count(col_name) > 1:
col_names[i] = col_name + '_' + str(col_names[:i].count(col_name) + 1)
# Rename the columns with the updated column names
df.columns = col_names
# st.dataframe(df)
# If there are duplicate col names, add _1, _2, etc. to the end of the col name
# Get the list of col names
# Get the column names and data types
all_col_infos = []
for column in df.columns:
column_info = {}
column_info['column_name'] = column
column_info['column_type'] = str(df[column].dtype)
column_info['column_info'] = ''
all_col_infos.append(column_info)
# Update the data types of the dataframe
for col_info in all_col_infos:
col_name = col_info['column_name']
col_type = col_info['column_type']
if col_type != 'object':
try:
df[col_name] = df[col_name].astype(col_type)
except:
df[col_name] = df[col_name].astype(str)
if col_type == 'object':
df[col_name] = df[col_name].astype(str)
return df
# MIGRATED
def upload_document_file(uploaded_file, file_extension):
"""
This function allows the user to upload a document file and save it to the project folder.
Args:
- uploaded_file (file): The file uploaded by the user.
- file_extension (str): The file extension of the uploaded file.
Returns:
- None
"""
tmp_folder = os.path.join(st.session_state.project_folder, 'documents')
# Create the tmp folder if it does not exist
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
# Get the name of the uploaded file
file_name = uploaded_file.name
# Get the file extension
file_extension = file_name.split('.')[-1]
# Remove the file extension
file_name = file_name.replace(f'.{file_extension}', '')
# Save the file to the tmp folder
tmp_file_path = os.path.join(tmp_folder, f"{file_name}.{file_extension}")
with open(tmp_file_path, 'wb') as f:
f.write(uploaded_file.getbuffer())
uploaded_file = None
return None
# MIGRATED
def file_upload_and_save():
"""
This function allows the user to upload a CSV or a parquet file, load it as a dataframe,
and provides a button to save the file as a parquet file with the same name.
"""
data_folder = os.path.join(st.session_state.project_folder, 'data')
# Define the allowed file types
allowed_data_file_types = ['csv', 'parquet', 'xlsx' , 'tsv', 'sqlite', 'db', 'sqlite3']
allowed_document_file_types = ['pdf', 'txt', 'vtt']
# Ask the user to upload a file
uploaded_files = st.file_uploader(
"Upload a file",
type=allowed_data_file_types + allowed_document_file_types,
accept_multiple_files=True)
file_extension = None
if len(uploaded_files) ==1:
st.session_state.files_uploaded = True
st.warning(f'Adding your new document(s) to the existing documents database')
uploaded_file = uploaded_files[0]
# Get the file extension
file_extension = uploaded_file.name.split('.')[-1]
# If the file is a data file, upload it as a data file
if file_extension in ['sqlite', 'db', 'sqlite3']:
export_sqlite_to_parquet(uploaded_file, data_folder)
st.success(f'Files saved successfully')
elif file_extension in allowed_data_file_types:
upload_data_file(uploaded_file, file_extension)
# If the file is a document file, upload it as a document file
elif file_extension in allowed_document_file_types:
upload_document_file(uploaded_file, file_extension)
elif len(uploaded_files) > 1:
st.warning(f'Adding your new document(s) to the existing documents database')
# Get the file extension
file_extension = uploaded_files[0].name.split('.')[-1]
# If the file is a document file, upload it as a document file
if file_extension in allowed_document_file_types:
for uploaded_file in uploaded_files:
upload_document_file(uploaded_file, file_extension)
if file_extension:
if file_extension in allowed_document_file_types:
tmp_folder = os.path.join(st.session_state.project_folder, 'documents')
# Create chunks of the document and save it to the data folder
df_chunks = create_document_chunk_df(tmp_folder)
# Add documents_tbid to the dataframe
df_chunks['documents_tbid'] = df_chunks.index+1
# Move the id column to the front
cols = df_chunks.columns.tolist()
cols = cols[-1:] + cols[:-1]
df_chunks = df_chunks[cols]
# Show the dataframe
st.dataframe(df_chunks)
uploaded_file = None
# Create a button to save the file as a parquet file in the data folder with the same name
# If the parquet file already exists, append the data to the existing file
if st.button('Save Document'):
# Save the file to the data folder
file_path = os.path.join(data_folder, 'documents.parquet')
# If the file already exists, append the data to the existing file
if os.path.exists(file_path):
# Load the existing file as a dataframe
df = pd.read_parquet(file_path)
# Append the data
df = pd.concat([df, df_chunks])
df = df.drop_duplicates(keep='first')
# Save the file to the data folder
df.to_parquet(file_path, index=False)
st.success(f'Data added successfully')
else:
# Save the file to the data folder
df_chunks = df_chunks.drop_duplicates(keep='first')
df_chunks.to_parquet(file_path, index=False)
st.success(f'Data saved successfully')
return None
# MIGRATED
def append_data_to_exisiting_file():
"""
This function allows the user to append data to an existing file.
It also allows the user to process the data and save it to a new file.
You can upload a CSV, JSON, PARQUET, EXCEL, or PICKLE file.
Once the file is uploaded, it is added to an existing parquet file.
"""
file_path = os.path.join(st.session_state.project_folder, 'data')
# Get the list of files in the project folder
files = glob(os.path.join(file_path, '*.parquet'))
# Ask the user to select a file to append data to
selected_file = st.selectbox("Select a file to append data to", files)
df1 = pd.read_parquet(selected_file)
# Upload a new file
uploaded_file = st.file_uploader("Upload a file", type=['csv', 'parquet'])
# If a file was uploaded, create a df2 dataframe
if uploaded_file is not None:
# Get the file extension
file_extension = uploaded_file.name.split('.')[-1]
# Load the file as a dataframe
if file_extension == 'csv':
df2 = pd.read_csv(uploaded_file)
elif file_extension == 'parquet':
df2 = pd.read_parquet(uploaded_file)
# Show the dataframe
st.dataframe(df2)
# If the columns are different, show the missing columns
df1_cols = set(df1.columns.tolist())
df2_cols = set(df2.columns.tolist())
if df1_cols != df2_cols:
missing_cols = df1_cols.difference(df2_cols)
st.warning(f'The following columns are missing in the uploaded file: {missing_cols}')
else:
st.info("The columns in the uploaded file match the columns in the existing file")
# Create a button to append the data to the existing file
if st.button('Append data'):
# Append the data
df = pd.concat([df1, df2])
# Save the file to the data folder
df.to_parquet(selected_file, index=False)
st.success(f'Data appended successfully')
uploaded_file = None
return None
| [
"user_requirements_chat_input",
"Enter your message",
"project_description_chat_input"
] |
2024-01-10 | Leezekun/zeno-build | zeno_build~models~global_models.py | """A module for global variables regarding models."""
from __future__ import annotations
import cohere
cohere_client: cohere.Client | None = None
| [] |
2024-01-10 | ryang420/Freyr-KG-LLM | api~src~domains~graph_domain.py | from langchain.graphs.graph_document import (
Node as BaseNode,
Relationship as BaseRelationship,
GraphDocument,
)
from langchain.schema import Document
from typing import List, Dict, Any, Optional
from langchain.pydantic_v1 import Field, BaseModel
class Property(BaseModel):
"""A single property consisting of key and value"""
key: str = Field(..., description="key")
value: str = Field(..., description="value")
class Node(BaseNode):
properties: Optional[List[Property]] = Field(
None, description="List of node properties")
class Relationship(BaseRelationship):
properties: Optional[List[Property]] = Field(
None, description="List of relationship properties"
)
class KnowledgeGraph(BaseModel):
"""Generate a knowledge graph with entities and relationships."""
nodes: List[Node] = Field(
[], description="List of nodes in the knowledge graph")
rels: List[Relationship] = Field(
[], description="List of relationships in the knowledge graph"
)
class GraphData(BaseModel):
"""Generate a knowledge graph with entities and relationships."""
nodes: List[Node] = Field(
[], description="List of nodes in the knowledge graph")
links: List[Relationship] = Field(
[], description="List of relationships in the knowledge graph"
)
def format_property_key(s: str) -> str:
words = s.split()
if not words:
return s
first_word = words[0].lower()
capitalized_words = [word.capitalize() for word in words[1:]]
return "".join([first_word] + capitalized_words)
def props_to_dict(props) -> dict:
"""Convert properties to a dictionary."""
properties = {}
if not props:
return properties
for p in props:
properties[format_property_key(p.key)] = p.value
return properties
def map_to_base_node(node: Node) -> BaseNode:
"""Map the KnowledgeGraph Node to the base Node."""
properties = props_to_dict(node.properties) if node.properties else {}
# Add name property for better Cypher statement generation
properties["name"] = node.id.title()
return BaseNode(
id=node.id.title(), type=node.type.capitalize(), properties=properties
)
def map_to_base_relationship(rel: Relationship) -> BaseRelationship:
"""Map the KnowledgeGraph Relationship to the base Relationship."""
source = map_to_base_node(rel.source)
target = map_to_base_node(rel.target)
properties = props_to_dict(rel.properties) if rel.properties else {}
return BaseRelationship(
source=source, target=target, type=rel.type, properties=properties
)
| [] |
2024-01-10 | ryang420/Freyr-KG-LLM | api~src~components~graph_data_update.py | from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains.openai_functions import (
create_structured_output_chain,
)
from langchain_core.output_parsers import StrOutputParser
def graph_data_augmentation(llm: ChatOpenAI,
graph_data: str,
user_input: str):
"""Augment graph data with user input."""
prompt = ChatPromptTemplate.from_messages([
(
"system",
"""
Your task is to make changes on the graph data by user's input and return a json string.
The graph data is a json format string. It contains two keys: nodes and links.
The user's input is to add, update or delete the nodes and links.
If the deleted node has any relationships or links on it, delete them as well.
Return me the json format string of the updated graph data only. No additional information included!
"""
),
("human", "Use the given context to update {graph_data} by {user_input}")
])
output_parser = StrOutputParser()
chain = prompt | llm | output_parser
return chain.invoke({"graph_data": graph_data, "user_input": user_input})
| [
"\n Your task is to make changes on the graph data by user's input and return a json string.\n The graph data is a json format string. It contains two keys: nodes and links. \n The user's input is to add, update or delete the nodes and links. \n If the deleted node has any relationships or links on it, delete them as well.\n Return me the json format string of the updated graph data only. No additional information included!\n\n ",
"Use the given context to update {graph_data} by {user_input}",
"[('system', \"\\n Your task is to make changes on the graph data by user's input and return a json string.\\n The graph data is a json format string. It contains two keys: nodes and links. \\n The user's input is to add, update or delete the nodes and links. \\n If the deleted node has any relationships or links on it, delete them as well.\\n Return me the json format string of the updated graph data only. No additional information included!\\n\\n \"), ('human', 'Use the given context to update {graph_data} by {user_input}')]",
"human"
] |
2024-01-10 | ryang420/Freyr-KG-LLM | api~src~components~graph_extraction.py | from langchain.chains.openai_functions import (
create_structured_output_chain,
)
from langchain.chat_models import ChatOpenAI
from langchain.graphs.graph_document import GraphDocument
from langchain.prompts import ChatPromptTemplate
from typing import List, Dict, Any, Optional
from domains.graph_domain import KnowledgeGraph, map_to_base_relationship, map_to_base_node
from langchain.schema import Document
def extract_graph(
llm: ChatOpenAI,
document: Document,
nodes: Optional[List[str]] = None,
rels: Optional[List[str]] = None) -> GraphDocument:
# Extract graph data using OpenAI functions
extract_chain = get_extraction_chain(llm, nodes, rels)
data = extract_chain.run(document.page_content)
# set page_content to empty string to avoid sending it to the client
document.page_content = ""
# Construct a graph document
graph_document = GraphDocument(
nodes=[map_to_base_node(node) for node in data.nodes],
relationships=[map_to_base_relationship(rel) for rel in data.rels],
source=document
)
return graph_document
def get_extraction_chain(
llm: ChatOpenAI,
allowed_nodes: Optional[List[str]] = None,
allowed_rels: Optional[List[str]] = None
):
prompt = ChatPromptTemplate.from_messages(
[(
"system",
f"""# Knowledge Graph Instructions for GPT-4
## 1. Overview
You are a top-tier algorithm designed for extracting information in structured formats to build a knowledge graph.
- **Nodes** represent entities and concepts. They're akin to Wikipedia nodes.
- The aim is to achieve simplicity and clarity in the knowledge graph, making it accessible for a vast audience.
## 2. Labeling Nodes
- **Consistency**: Ensure you use basic or elementary types for node labels.
- For example, when you identify an entity representing a person, always label it as **"person"**. Avoid using more specific terms like "mathematician" or "scientist".
- **Node IDs**: Never utilize integers as node IDs. Node IDs should be names or human-readable identifiers found in the text.
{'- **Allowed Node Labels:**' + ", ".join(allowed_nodes) if allowed_nodes else ""}
{'- **Allowed Relationship Types**:' + ", ".join(allowed_rels) if allowed_rels else ""}
## 3. Handling Numerical Data and Dates
- Numerical data, like age or other related information, should be incorporated as attributes or properties of the respective nodes.
- **No Separate Nodes for Dates/Numbers**: Do not create separate nodes for dates or numerical values. Always attach them as attributes or properties of nodes.
- **Property Format**: Properties must be in a key-value format.
- **Quotation Marks**: Never use escaped single or double quotes within property values.
- **Naming Convention**: Use camelCase for property keys, e.g., `birthDate`. Use UPPER_CASE for relationship type, e.g., `FOUNDED_BY`.
## 4. Co-reference Resolution
- **Maintain Entity Consistency**: When extracting entities, it's vital to ensure consistency.
If an entity, such as "John Doe", is mentioned multiple times in the text but is referred to by different names or pronouns (e.g., "Joe", "he"),
always use the most complete identifier for that entity throughout the knowledge graph. In this example, use "John Doe" as the entity ID.
Remember, the knowledge graph should be coherent and easily understandable, so maintaining consistency in entity references is crucial.
## 5. Strict Compliance
Adhere to the rules strictly. Non-compliance will result in termination.
"""),
("human", "Use the given format to extract information from the following input: {input}"),
("human", "Tip: Make sure to answer in the correct format"),
])
return create_structured_output_chain(KnowledgeGraph, llm, prompt, verbose=False)
| [
"- **Allowed Node Labels:**",
"human",
"- **Allowed Relationship Types**:",
"Use the given format to extract information from the following input: {input}",
"Tip: Make sure to answer in the correct format",
", "
] |
2024-01-10 | hlterdo/main | codelabs~py~gmail~chaatgpt.py | import logging
from gmail_utils import fetch_emails
from inde_email import IndeEmail
import os
import json
from openai_utils import get_openai_embedding
from openai_utils import embedding_similarity
from openai_utils import call_chatgpt_per_email
from openai_utils import CHATGPT_DOESNT_KNOW
from openai_utils import call_chatgpt_for_multiple_answers
from log_level import LOG_LEVEL
# Number of latest emails to fetch from gmail account.
_NUM_EMAILS_TO_READ = 25
# Number of emails to try to answer users questions. Only this many top (based on their similarity)
_NUM_TOP_EMAILS_TO_CHECK_FOR_ANSWER = 15
# Filter that's applied (positives will be kept) when fetching emails.
_EMAIL_FILTER = "from:[email protected]"
# Where the tokens are kept. See the README
_ACCESS_TOKENS_FILE = "tokens.json"
# Where a copy of each email that's found is kept.
_EMAIL_DUMPS_FOLDER = "emails/"
# Set up logging. Modify log_level.py to change the logging level.
logging.basicConfig()
LOG = logging.getLogger("chaatgpt")
LOG.setLevel(LOG_LEVEL)
# *** MAIN STARTS *** #
print(
f"Trying to fetch {_NUM_EMAILS_TO_READ} emails using the filter \"{_EMAIL_FILTER}\"")
emails = fetch_emails(_ACCESS_TOKENS_FILE, _NUM_EMAILS_TO_READ, _EMAIL_FILTER)
emails_dict = {email.get_email_id(): email for email in emails}
assert len(emails_dict) == len(emails), f"There cannot be duplicate email ids."
print(f"\n\nWill use {len(emails)} emails as the knowledge base.")
LOG.info(f"Writing emails to the folder {_EMAIL_DUMPS_FOLDER}")
for email in emails:
os.makedirs(os.path.dirname(_EMAIL_DUMPS_FOLDER), exist_ok=True)
path = os.path.join(_EMAIL_DUMPS_FOLDER, email.get_subject(
) + "-" + email.get_email_id() + ".json")
with open(path, 'w') as f:
json.dump(email.to_dict(), f, indent='\n')
LOG.info(f"Done writing emails to the folder {_EMAIL_DUMPS_FOLDER}")
LOG.info(f"Calculating embeddings for each email")
for email in emails:
embedding = get_openai_embedding(email.get_clean_body())
email.set_embedding(embedding)
LOG.info(f"Done calculating embeddings for each email")
# Continues get questions from the user until they type quit
while True:
question = input("\n\nEnter a question (type quit to quit): ")
if question == "quit":
break
query_embedding = get_openai_embedding(question)
# email_id -> similarity to the query.
query_email_similarities = {}
for email in emails:
email_id = email.get_email_id()
email_embedding = email.get_embedding()
similarity = embedding_similarity(query_embedding, email_embedding)
query_email_similarities[email_id] = similarity
# sort each email based on their similarity to the query.
sorted_query_email_similarities = dict(
sorted(query_email_similarities.items(), key=lambda x: x[1], reverse=True))
print(
"\n\n\n=== RANKED ORDER OF EMAILS TO THE QUESTION BASED ON THEIR SIMILARITY ===\n\n")
for email_id, similarity in sorted_query_email_similarities.items():
subject = emails_dict[email_id].get_subject()
print(f"{subject}: {similarity}")
print(
f"\n\n==== Checking top {_NUM_TOP_EMAILS_TO_CHECK_FOR_ANSWER} top emails to check whether it can answer your question ====\n\n")
num_emails_checked = 0
answering_emails = []
answers = []
for email_id, similarity in sorted_query_email_similarities.items():
email = emails_dict[email_id]
subject = email.get_subject()
date = email.get_date()
clean_body = email.get_clean_body()
chatgpt_answer = call_chatgpt_per_email(clean_body, question)
print(
f"Answer from email \"{subject}\" sent on {date}:\n\n{chatgpt_answer}:\n\n\n")
num_emails_checked += 1
if (num_emails_checked >= _NUM_TOP_EMAILS_TO_CHECK_FOR_ANSWER):
break
if chatgpt_answer != CHATGPT_DOESNT_KNOW:
answering_emails.append(email)
answers.append(chatgpt_answer)
print(
f"\n\n\nFound {len(answering_emails)} emails that contains an answer to the question.")
for i in range(len(answering_emails)):
print(f"{i + 1}. {answering_emails[i].get_subject()}")
print("\n\n\n=== ***** FINAL ANSWER ***** ===\n\n")
if len(answering_emails) == 0:
print(f"We couldn't answer this question using the emails listed above\n\n")
continue
final_answer = call_chatgpt_for_multiple_answers(
answering_emails, answers, question)
print(final_answer)
| [] |
2024-01-10 | tszumowski/vocaltales_storyteller_chatbot | storyteller.py | """
Storyteller: A simple audio storytelling app using OpenAI API.
Example Usage:
python storyteller.py --address=127.0.0.1 --port=7860
"""
import argparse
import base64
import config
import gradio as gr
import io
import json
import openai
import os
import requests
import subprocess
from config import SpeechMethod
from google.cloud import texttospeech
from typing import Dict, List, Tuple
# Set OpenAI API Key
openai.api_key = os.environ.get("OPENAI_API_KEY")
if openai.api_key is None:
raise ValueError("OpenAI API Key not set as environnment variable OPENAI_API_KEY")
# Get eleven.io
elevenio_api_key = None
if config.SPEECH_METHOD == SpeechMethod.ELEVENIO:
elevenio_api_key = os.environ.get("ELEVENIO_API_KEY")
if elevenio_api_key is None:
raise ValueError(
"Eleven.io API Key not set as environnment variable ELEVENIO_API_KEY"
)
# Initial message
messages = [
{
"role": "system",
"content": config.INITIAL_PROMPT,
}
]
"""
Main functions
"""
def transcribe_audio(audio_file: str) -> str:
"""
Transcribe audio file using OpenAI API.
Args:
audio: stringified path to audio file. WAV file type.
Returns:
str: Transcription of audio file
"""
# gradio sends in a .wav file type, but it may not be named that. Rename with
# .wav extension because Whisper model only accepts certain file extensions.
if not audio_file.endswith(".wav"):
os.rename(audio_file, audio_file + ".wav")
audio_file = audio_file + ".wav"
# Open audio file and transcribe
with open(audio_file, "rb") as f:
transcript = openai.Audio.transcribe("whisper-1", f)
text_transcription = transcript["text"]
return text_transcription
def chat_complete(
text_input: str, messages: List[Dict[str, str]]
) -> Tuple[str, List[Dict[str, str]]]:
"""
Chat complete using OpenAI API. This is what generates stories.
Args:
text_input: Text to use as prompt for story generation
messages: List of previous messages
Returns:
str: Generated story
messages: Updated list of messages
"""
# Init with prompt on first call
if not messages:
messages = [
{
"role": "system",
"content": config.INITIAL_PROMPT,
}
]
# Append to messages for chat completion
messages.append({"role": "user", "content": text_input})
# Fetch response from OpenAI
print("Messages sent to call: ", messages)
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
# Extract and store message
system_message = dict(response["choices"][0]["message"])
messages.append(system_message)
# Return message to display
display_message = system_message["content"]
if config.SPEECH_METHOD == SpeechMethod.MAC:
# call subprocess in background
subprocess.Popen(["say", system_message["content"]])
return display_message, messages
def generate_image(text_input: str) -> str:
"""
Generate an image using DALL-E via OpenAI API.
Args:
text_input: Text to use as prompt for image generation
Returns:
str: Path to generated image
"""
prompt = text_input[: config.PROMPT_MAX_LEN]
response = openai.Image.create(prompt=prompt, n=1, size=config.RESOLUTION)
image_url = response["data"][0]["url"]
img_data = requests.get(image_url).content
with open(config.IMAGE_PATH, "wb") as handler:
handler.write(img_data)
return config.IMAGE_PATH
def audio_file_to_html(audio_file: str) -> str:
"""
Convert audio file to HTML audio player.
Args:
audio_file: Path to audio file
Returns:
audio_player: HTML audio player that auto-plays
"""
# Read in audio file to audio_bytes
audio_bytes = io.BytesIO()
with open(audio_file, "rb") as f:
audio_bytes.write(f.read())
# Generate audio player HTML object for autoplay
audio_bytes.seek(0)
audio = base64.b64encode(audio_bytes.read()).decode("utf-8")
audio_player = (
f'<audio src="data:audio/mpeg;base64,{audio}" controls autoplay></audio>'
)
return audio_player
def text_to_speech_gcp(input_text: str, tts_voice_label: str) -> str:
"""
Use GCP Text-to-Speech API to convert text to a WAV file.
Args:
input_text: Text to convert to speech
tts_voice_label: Label of voice to use, from keys of TTS_VOICE_OPTIONS in config
Returns
str: Path to output audio file
"""
print(f"Convert text to speech: {input_text}")
# set up the client object
client = texttospeech.TextToSpeechClient()
# set up the synthesis input object
synthesis_input = texttospeech.SynthesisInput(text=input_text)
# derive language code and ID
tts_voice_id = config.TTS_VOICE_OPTIONS[tts_voice_label]
tts_language_code = "-".join(tts_voice_id.split("-")[0:2])
# set up the voice parameters
voice = texttospeech.VoiceSelectionParams(
language_code=tts_language_code, name=tts_voice_id
)
# set up the audio parameters
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
)
# generate the request
response = client.synthesize_speech(
input=synthesis_input, voice=voice, audio_config=audio_config
)
# save the response audio as an MP3 file
with open(config.GENERATED_SPEECH_PATH, "wb") as out:
out.write(response.audio_content)
# Generate audio player HTML object for autoplay
audio_player = audio_file_to_html(config.GENERATED_SPEECH_PATH)
return audio_player
def text_to_speech_elevenio(
input_text: str,
tts_voice_id: str,
stability: float = 0.65,
similarity_boost: float = 0.85,
) -> str:
"""
Use Eleven.io Text-to-Speech API to convert text to a WAV file.
Args:
input_text: Text to convert to speech
tts_voice_label: Label of voice to use, from keys of ELEVENIO_VOICE_ID in config
similarity_boost: Similarity boost for voice
stability: Stability for voice
Returns
str: Path to output audio file
"""
print(f"Convert text to speech: {input_text}")
tts_voice_id = config.ELEVENIO_VOICE_ID # Use pre-assigned from config
url = f"{config.ELEVENIO_TTS_BASE_URL}/{tts_voice_id}"
payload = json.dumps(
{
"text": input_text,
"voice_settings": {
"stability": stability,
"similarity_boost": similarity_boost,
},
}
)
headers = {
"xi-api-key": elevenio_api_key,
"Content-Type": "application/json",
"Accept": "audio/mpeg",
}
response = requests.request("POST", url, headers=headers, data=payload)
# save the response audio as an MP3 file
with open(config.GENERATED_SPEECH_PATH, "wb") as out:
out.write(response.content)
# Generate audio player HTML object for autoplay
audio_player = audio_file_to_html(config.GENERATED_SPEECH_PATH)
# return response.audio_content
return audio_player
"""
Gradio UI Definition
"""
with gr.Blocks(analytics_enabled=False, title="VocalTales: Audio Storyteller") as ui:
# Session state box containing all user/system messages, hidden
messages = gr.State(list())
# Initialize TTS
tts_fn = None
if config.SPEECH_METHOD == SpeechMethod.GCP:
tts_fn = text_to_speech_gcp
elif config.SPEECH_METHOD == SpeechMethod.ELEVENIO:
tts_fn = text_to_speech_elevenio
# Set up layout and link actions together
with gr.Row():
with gr.Column(scale=1):
with gr.Accordion("Click for Instructions & Configuration:", open=False):
# Voice Selection Dropdown
voice_labels = [k for k in config.TTS_VOICE_OPTIONS.keys()]
voice_selection = gr.Dropdown(
choices=voice_labels,
value=config.TTS_VOICE_DEFAULT,
label="Voice Selection",
)
# Instructions
gr.Markdown(config.INSTRUCTIONS_TEXT)
# Audio Input Box
audio_input = gr.Audio(
source="microphone", type="filepath", label="User Audio Input"
)
# User Input Box
transcribed_input = gr.Textbox(label="Transcription")
# Story Output Box
story_msg = gr.Textbox(label="Story")
if tts_fn:
# Connect story output to audio output after calling TTS on it
html = gr.HTML()
story_msg.change(tts_fn, [story_msg, voice_selection], html)
with gr.Column(scale=1):
# Story Generated Image
gen_image = gr.Image(label="Story Image", shape=(None, 5))
# Connect audio input to user input
audio_input.change(transcribe_audio, audio_input, transcribed_input)
# Connect user input to story output
transcribed_input.change(
chat_complete, [transcribed_input, messages], [story_msg, messages]
)
# Connect story output to image generation
story_msg.change(generate_image, story_msg, gen_image)
if __name__ == "__main__":
# Add a address string argument that defaults to 127.0.0.1
parser = argparse.ArgumentParser()
parser.add_argument(
"--address",
type=str,
default="127.0.0.1",
help="""
Address to run the server on. 127.0.0.1 for local. 0.0.0.0 for "
remote or docker
""",
)
# add a port with None default
parser.add_argument(
"--port",
type=int,
default=None,
help="Port to run the server on",
)
parser.add_argument(
"--username",
type=str,
default=None,
help="Username for basic auth",
)
parser.add_argument(
"--password",
type=str,
default=None,
help="Password for basic auth",
)
args = parser.parse_args()
# Configure auth
if args.username and args.password:
auth = (args.username, args.password)
else:
auth = None
# Launch UI
ui.launch(server_name=args.address, server_port=args.port, auth=auth)
| [] |
2024-01-10 | zuoxingdong/lagom | baselines~openaies~experiment.py | from multiprocessing import Pool
import time
import numpy as np
import torch
import gym
from lagom import Logger
from lagom import EpisodeRunner
from lagom.transform import describe
from lagom.utils import CloudpickleWrapper
from lagom.utils import pickle_dump
from lagom.utils import tensorify
from lagom.utils import set_global_seeds
from lagom.experiment import Config
from lagom.experiment import Grid
from lagom.experiment import run_experiment
from lagom.envs import TimeStepEnv
from baselines.openaies.openaies import OpenAIES
from baselines.openaies.agent import Agent
config = Config(
{'log.freq': 10,
'checkpoint.num': 3,
'env.id': Grid(['Acrobot-v1', 'BipedalWalker-v2', 'Pendulum-v0', 'LunarLanderContinuous-v2']),
'nn.sizes': [64, 64],
# only for continuous control
'env.clip_action': True, # clip action within valid bound before step()
'agent.std0': 0.6, # initial std
'train.generations': 500, # total number of ES generations
'train.popsize': 32,
'train.worker_chunksize': 4, # must be divisible by popsize
'train.mu0': 0.0,
'train.std0': 1.0,
'train.lr': 1e-2,
'train.lr_decay': 1.0,
'train.min_lr': 1e-6,
'train.sigma_scheduler_args': [1.0, 0.01, 400, 0],
'train.antithetic': False,
'train.rank_transform': True
})
def make_env(config, seed, mode):
assert mode in ['train', 'eval']
env = gym.make(config['env.id'])
env.seed(seed)
env.observation_space.seed(seed)
env.action_space.seed(seed)
if config['env.clip_action'] and isinstance(env.action_space, gym.spaces.Box):
env = gym.wrappers.ClipAction(env) # TODO: use tanh to squash policy output when RescaleAction wrapper merged in gym
env = TimeStepEnv(env)
return env
def fitness(data):
torch.set_num_threads(1) # VERY IMPORTANT TO AVOID GETTING STUCK
config, seed, device, param = data
env = make_env(config, seed, 'train')
agent = Agent(config, env, device)
agent.from_vec(tensorify(param, 'cpu'))
runner = EpisodeRunner()
with torch.no_grad():
D = runner(agent, env, 10)
R = np.mean([sum(traj.rewards) for traj in D])
H = np.mean([traj.T for traj in D])
return R, H
def run(config, seed, device, logdir):
set_global_seeds(seed)
torch.set_num_threads(1) # VERY IMPORTANT TO AVOID GETTING STUCK
print('Initializing...')
agent = Agent(config, make_env(config, seed, 'eval'), device)
es = OpenAIES([config['train.mu0']]*agent.num_params, config['train.std0'],
{'popsize': config['train.popsize'],
'seed': seed,
'sigma_scheduler_args': config['train.sigma_scheduler_args'],
'lr': config['train.lr'],
'lr_decay': config['train.lr_decay'],
'min_lr': config['train.min_lr'],
'antithetic': config['train.antithetic'],
'rank_transform': config['train.rank_transform']})
train_logs = []
checkpoint_count = 0
with Pool(processes=config['train.popsize']//config['train.worker_chunksize']) as pool:
print('Finish initialization. Training starts...')
for generation in range(config['train.generations']):
t0 = time.perf_counter()
solutions = es.ask()
data = [(config, seed, device, solution) for solution in solutions]
out = pool.map(CloudpickleWrapper(fitness), data, chunksize=config['train.worker_chunksize'])
Rs, Hs = zip(*out)
es.tell(solutions, [-R for R in Rs])
logger = Logger()
logger('generation', generation+1)
logger('num_seconds', round(time.perf_counter() - t0, 1))
logger('Returns', describe(Rs, axis=-1, repr_indent=1, repr_prefix='\n'))
logger('Horizons', describe(Hs, axis=-1, repr_indent=1, repr_prefix='\n'))
logger('fbest', es.result.fbest)
train_logs.append(logger.logs)
if generation == 0 or (generation+1) % config['log.freq'] == 0:
logger.dump(keys=None, index=0, indent=0, border='-'*50)
if (generation+1) >= int(config['train.generations']*(checkpoint_count/(config['checkpoint.num'] - 1))):
agent.from_vec(tensorify(es.result.xbest, 'cpu'))
agent.checkpoint(logdir, generation+1)
checkpoint_count += 1
pickle_dump(obj=train_logs, f=logdir/'train_logs', ext='.pkl')
return None
if __name__ == '__main__':
run_experiment(run=run,
config=config,
seeds=[1770966829, 1500925526, 2054191100],
log_dir='logs/default',
max_workers=7, # tune to fulfill computation power
chunksize=1,
use_gpu=False,
gpu_ids=None)
| [] |
2024-01-10 | tehcoderer/OpenBBTerminal | openbb_terminal~helper_funcs.py | """Helper functions."""
__docformat__ = "numpy"
# pylint: disable=too-many-lines
# IMPORTS STANDARD LIBRARY
# IMPORTS STANDARD
import argparse
import inspect
import io
import json
import logging
import os
import random
import re
import shutil
import sys
import urllib.parse
import webbrowser
from datetime import (
date as d,
datetime,
timedelta,
)
from difflib import SequenceMatcher
from functools import lru_cache
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
# IMPORTS THIRDPARTY
import iso8601
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas.io.formats.format
import pandas_ta as ta
import pytz
import requests
import yfinance as yf
from holidays import US as us_holidays
from langchain.chat_models import ChatOpenAI
from llama_index import (
LLMPredictor,
PromptHelper,
ServiceContext,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
from pandas._config.config import get_option
from pandas.plotting import register_matplotlib_converters
from PIL import Image, ImageDraw
from rich.table import Table
from screeninfo import get_monitors
from openbb_terminal import OpenBBFigure, plots_backend
from openbb_terminal.core.config.paths import (
HOME_DIRECTORY,
MISCELLANEOUS_DIRECTORY,
)
from openbb_terminal.core.plots.plotly_ta.ta_class import PlotlyTA
from openbb_terminal.core.session.current_system import get_current_system
# IMPORTS INTERNAL
from openbb_terminal.core.session.current_user import get_current_user
from openbb_terminal.decorators import check_api_key
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
if (
get_current_user().preferences.PLOT_BACKEND is not None
and get_current_user().preferences.PLOT_BACKEND != "None"
):
matplotlib.use(get_current_user().preferences.PLOT_BACKEND)
NO_EXPORT = 0
EXPORT_ONLY_RAW_DATA_ALLOWED = 1
EXPORT_ONLY_FIGURES_ALLOWED = 2
EXPORT_BOTH_RAW_DATA_AND_FIGURES = 3
MENU_GO_BACK = 0
MENU_QUIT = 1
MENU_RESET = 2
GPT_INDEX_DIRECTORY = MISCELLANEOUS_DIRECTORY / "gpt_index/"
GPT_INDEX_VER = 0.4
# Command location path to be shown in the figures depending on watermark flag
command_location = ""
# pylint: disable=R1702,R0912
# pylint: disable=global-statement
def set_command_location(cmd_loc: str):
"""Set command location.
Parameters
----------
cmd_loc: str
Command location called by user
"""
if cmd_loc.split("/")[-1] == "hold":
return
global command_location # noqa
command_location = cmd_loc
def check_path(path: str) -> str:
"""Check that path file exists.
Parameters
----------
path: str
path of file
Returns
-------
str:
Ratio of similarity between two strings
"""
# Just return empty path because this will be handled outside this function
if not path:
return ""
if path[0] == "~":
path = path.replace("~", HOME_DIRECTORY.as_posix())
# Return string of path if such relative path exists
if os.path.isfile(path):
return path
# Return string of path if an absolute path exists
if os.path.isfile("/" + path):
return f"/{path}"
logger.error("The path file '%s' does not exist.", path)
console.print(f"[red]The path file '{path}' does not exist.\n[/red]")
return ""
def parse_and_split_input(an_input: str, custom_filters: List) -> List[str]:
"""Filter and split the input queue.
Uses regex to filters command arguments that have forward slashes so that it doesn't
break the execution of the command queue.
Currently handles unix paths and sorting settings for screener menus.
Parameters
----------
an_input : str
User input as string
custom_filters : List
Additional regular expressions to match
Returns
-------
List[str]
Command queue as list
"""
# Make sure that the user can go back to the root when doing "/"
if an_input and an_input == "/":
an_input = "home"
# everything from ` -f ` to the next known extension
file_flag = r"(\ -f |\ --file )"
up_to = r".*?"
known_extensions = r"(\.(xlsx|csv|xls|tsv|json|yaml|ini|openbb|ipynb))"
unix_path_arg_exp = f"({file_flag}{up_to}{known_extensions})"
# Add custom expressions to handle edge cases of individual controllers
custom_filter = ""
for exp in custom_filters:
if exp is not None:
custom_filter += f"|{exp}"
del exp
slash_filter_exp = f"({unix_path_arg_exp}){custom_filter}"
filter_input = True
placeholders: Dict[str, str] = {}
while filter_input:
match = re.search(pattern=slash_filter_exp, string=an_input)
if match is not None:
placeholder = f"{{placeholder{len(placeholders)+1}}}"
placeholders[placeholder] = an_input[
match.span()[0] : match.span()[1] # noqa:E203
]
an_input = (
an_input[: match.span()[0]]
+ placeholder
+ an_input[match.span()[1] :] # noqa:E203
)
else:
filter_input = False
commands = an_input.split("/")
for command_num, command in enumerate(commands):
if command == commands[command_num] == commands[-1] == "":
return list(filter(None, commands))
matching_placeholders = [tag for tag in placeholders if tag in command]
if len(matching_placeholders) > 0:
for tag in matching_placeholders:
commands[command_num] = command.replace(tag, placeholders[tag])
return commands
def log_and_raise(error: Union[argparse.ArgumentTypeError, ValueError]) -> None:
"""Log and output an error."""
logger.error(str(error))
raise error
def similar(a: str, b: str) -> float:
"""Return a similarity float between string a and string b.
Parameters
----------
a: str
string a
b: str
string b
Returns
-------
float:
Ratio of similarity between two strings
"""
return SequenceMatcher(None, a, b).ratio()
def return_colored_value(value: str):
"""Return the string value with green, yellow, red or white color based on
whether the number is positive, negative, zero or other, respectively.
Parameters
----------
value: str
string to be checked
Returns
-------
value: str
string with color based on value of number if it exists
"""
values = re.findall(r"[-+]?(?:\d*\.\d+|\d+)", value)
# Finds exactly 1 number in the string
if len(values) == 1:
if float(values[0]) > 0:
return f"[green]{value}[/green]"
if float(values[0]) < 0:
return f"[red]{value}[/red]"
if float(values[0]) == 0:
return f"[yellow]{value}[/yellow]"
return f"{value}"
# pylint: disable=too-many-arguments
def print_rich_table( # noqa: PLR0912
df: pd.DataFrame,
show_index: bool = False,
title: str = "",
index_name: str = "",
headers: Optional[Union[List[str], pd.Index]] = None,
floatfmt: Union[str, List[str]] = ".2f",
show_header: bool = True,
automatic_coloring: bool = False,
columns_to_auto_color: Optional[List[str]] = None,
rows_to_auto_color: Optional[List[str]] = None,
export: bool = False,
print_to_console: bool = False,
limit: Optional[int] = 1000,
source: Optional[str] = None,
columns_keep_types: Optional[List[str]] = None,
):
"""Prepare a table from df in rich.
Parameters
----------
df: pd.DataFrame
Dataframe to turn into table
show_index: bool
Whether to include index
title: str
Title for table
index_name : str
Title for index column
headers: List[str]
Titles for columns
floatfmt: Union[str, List[str]]
Float number formatting specs as string or list of strings. Defaults to ".2f"
show_header: bool
Whether to show the header row.
automatic_coloring: bool
Automatically color a table based on positive and negative values
columns_to_auto_color: List[str]
Columns to automatically color
rows_to_auto_color: List[str]
Rows to automatically color
export: bool
Whether we are exporting the table to a file. If so, we don't want to print it.
limit: Optional[int]
Limit the number of rows to show.
print_to_console: bool
Whether to print the table to the console. If False and interactive mode is
enabled, the table will be displayed in a new window. Otherwise, it will print to the
console.
source: Optional[str]
Source of the table. If provided, it will be displayed in the header of the table.
columns_keep_types: Optional[List[str]]
Columns to keep their types, i.e. not convert to numeric
"""
if export:
return
current_user = get_current_user()
enable_interactive = (
current_user.preferences.USE_INTERACTIVE_DF and plots_backend().isatty
)
# Make a copy of the dataframe to avoid SettingWithCopyWarning
df = df.copy()
show_index = not isinstance(df.index, pd.RangeIndex) and show_index
# convert non-str that are not timestamp or int into str
# eg) praw.models.reddit.subreddit.Subreddit
for col in df.columns:
if columns_keep_types is not None and col in columns_keep_types:
continue
try:
if not any(
isinstance(df[col].iloc[x], pd.Timestamp)
for x in range(min(10, len(df)))
):
df[col] = pd.to_numeric(df[col], errors="ignore")
except (ValueError, TypeError):
df[col] = df[col].astype(str)
def _get_headers(_headers: Union[List[str], pd.Index]) -> List[str]:
"""Check if headers are valid and return them."""
output = _headers
if isinstance(_headers, pd.Index):
output = list(_headers)
if len(output) != len(df.columns):
log_and_raise(
ValueError("Length of headers does not match length of DataFrame")
)
return output
if enable_interactive and not print_to_console:
df_outgoing = df.copy()
# If headers are provided, use them
if headers is not None:
# We check if headers are valid
df_outgoing.columns = _get_headers(headers)
if show_index and index_name not in df_outgoing.columns:
# If index name is provided, we use it
df_outgoing.index.name = index_name or "Index"
df_outgoing = df_outgoing.reset_index()
for col in df_outgoing.columns:
if col == "":
df_outgoing = df_outgoing.rename(columns={col: " "})
plots_backend().send_table(
df_table=df_outgoing,
title=title,
source=source, # type: ignore
theme=current_user.preferences.TABLE_STYLE,
)
return
df = df.copy() if not limit else df.copy().iloc[:limit]
if automatic_coloring:
if columns_to_auto_color:
for col in columns_to_auto_color:
# checks whether column exists
if col in df.columns:
df[col] = df[col].apply(lambda x: return_colored_value(str(x)))
if rows_to_auto_color:
for row in rows_to_auto_color:
# checks whether row exists
if row in df.index:
df.loc[row] = df.loc[row].apply(
lambda x: return_colored_value(str(x))
)
if columns_to_auto_color is None and rows_to_auto_color is None:
df = df.applymap(lambda x: return_colored_value(str(x)))
if current_user.preferences.USE_TABULATE_DF:
table = Table(title=title, show_lines=True, show_header=show_header)
if show_index:
table.add_column(index_name)
if headers is not None:
headers = _get_headers(headers)
for header in headers:
table.add_column(str(header))
else:
for column in df.columns:
table.add_column(str(column))
if isinstance(floatfmt, list) and len(floatfmt) != len(df.columns):
log_and_raise(
ValueError(
"Length of floatfmt list does not match length of DataFrame columns."
)
)
if isinstance(floatfmt, str):
floatfmt = [floatfmt for _ in range(len(df.columns))]
for idx, values in zip(df.index.tolist(), df.values.tolist()):
# remove hour/min/sec from timestamp index - Format: YYYY-MM-DD # make better
row_idx = [str(idx)] if show_index else []
row_idx += [
str(x)
if not isinstance(x, float) and not isinstance(x, np.float64)
else (
f"{x:{floatfmt[idx]}}"
if isinstance(floatfmt, list)
else (
f"{x:.2e}" if 0 < abs(float(x)) <= 0.0001 else f"{x:floatfmt}"
)
)
for idx, x in enumerate(values)
]
table.add_row(*row_idx)
console.print(table)
else:
console.print(df.to_string(col_space=0))
def check_int_range(mini: int, maxi: int):
"""Check if argparse argument is an int between 2 values.
Parameters
----------
mini: int
Min value to compare
maxi: int
Max value to compare
Returns
-------
int_range_checker:
Function that compares the three integers
"""
# Define the function with default arguments
def int_range_checker(num: int) -> int:
"""Check if int is between a high and low value.
Parameters
----------
num: int
Input integer
Returns
----------
num: int
Input number if conditions are met
Raises
------
argparse.ArgumentTypeError
Input number not between min and max values
"""
num = int(num)
if num < mini or num > maxi:
log_and_raise(
argparse.ArgumentTypeError(f"Argument must be in range [{mini},{maxi}]")
)
return num
# Return function handle to checking function
return int_range_checker
def check_non_negative(value) -> int:
"""Argparse type to check non negative int."""
new_value = int(value)
if new_value < 0:
log_and_raise(argparse.ArgumentTypeError(f"{value} is negative"))
return new_value
def check_terra_address_format(address: str) -> str:
"""Validate that terra account address has proper format.
Example: ^terra1[a-z0-9]{38}$
Parameters
----------
address: str
terra blockchain account address
Returns
-------
str
Terra blockchain address or raise argparse exception
"""
pattern = re.compile(r"^terra1[a-z0-9]{38}$")
if not pattern.match(address):
log_and_raise(
argparse.ArgumentTypeError(
f"Terra address: {address} has invalid format. Valid format: ^terra1[a-z0-9]{{38}}$"
)
)
return address
def check_non_negative_float(value) -> float:
"""Argparse type to check non negative int."""
new_value = float(value)
if new_value < 0:
log_and_raise(argparse.ArgumentTypeError(f"{value} is negative"))
return new_value
def check_positive_list(value) -> List[int]:
"""Argparse type to return list of positive ints."""
list_of_nums = value.split(",")
list_of_pos = []
for a_value in list_of_nums:
new_value = int(a_value)
if new_value <= 0:
log_and_raise(
argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
)
list_of_pos.append(new_value)
return list_of_pos
def check_positive_float_list(value) -> List[float]:
"""Argparse type to return list of positive floats."""
list_of_nums = value.split(",")
list_of_pos = []
for a_value in list_of_nums:
new_value = float(a_value)
if new_value <= 0:
log_and_raise(
argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
)
list_of_pos.append(new_value)
return list_of_pos
def check_positive(value) -> int:
"""Argparse type to check positive int."""
new_value = int(value)
if new_value <= 0:
log_and_raise(
argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
)
return new_value
def check_indicators(string: str) -> List[str]:
"""Check if indicators are valid."""
ta_cls = PlotlyTA()
choices = sorted(
[c.name.replace("plot_", "") for c in ta_cls if c.name != "plot_ma"]
+ ta_cls.ma_mode
)
choices_print = (
f"{'`, `'.join(choices[:10])}`\n `{'`, `'.join(choices[10:20])}"
f"`\n `{'`, `'.join(choices[20:])}"
)
strings = string.split(",")
for s in strings:
if s not in choices:
raise argparse.ArgumentTypeError(
f"\nInvalid choice: {s}, choose from \n `{choices_print}`",
)
return strings
def check_indicator_parameters(args: str, _help: bool = False) -> str:
"""Check if indicators parameters are valid."""
ta_cls = PlotlyTA()
indicators_dict: dict = {}
regex = re.compile(r"([a-zA-Z]+)\[([0-9.,]*)\]")
no_params_regex = re.compile(r"([a-zA-Z]+)")
matches = regex.findall(args)
no_params_matches = no_params_regex.findall(args)
indicators = [m[0] for m in matches]
for match in no_params_matches:
if match not in indicators:
matches.append((match, ""))
if _help:
console.print(
"""[yellow]To pass custom parameters to indicators:[/]
[green]Example:
-i macd[12,26,9],rsi[14],sma[20,50]
-i macd,rsi,sma (uses default parameters)
[yellow]Would pass the following to the indicators:[/]
[green]macd=dict(fast=12, slow=26, signal=9)
rsi=dict(length=14)
sma=dict(length=[20,50])
They must be in the same order as the function parameters.[/]\n"""
)
pop_keys = ["close", "high", "low", "open", "open_", "volume", "talib", "return"]
if matches:
check_indicators(",".join([m[0] for m in matches]))
for match in matches:
indicator, args = match
indicators_dict.setdefault(indicator, {})
if indicator in ["fib", "srlines", "demark", "clenow"]:
if _help:
console.print(
f"[yellow]{indicator}:[/]\n{'':^4}[green]Parameters: None[/]"
)
continue
fullspec = inspect.getfullargspec(getattr(ta, indicator))
kwargs = list(set(fullspec.args) - set(pop_keys))
kwargs.sort(key=fullspec.args.index)
if _help:
console.print(
f"[yellow]{indicator}:[/]\n{'':^4}[green]Parameters: {', '.join(kwargs)}[/]"
)
if indicator in ta_cls.ma_mode:
indicators_dict[indicator]["length"] = check_positive_list(args)
continue
for i, arg in enumerate(args.split(",")):
if arg and len(kwargs) > i:
indicators_dict[indicator][kwargs[i]] = (
float(arg) if "." in arg else int(arg)
)
return json.dumps(indicators_dict)
if not matches:
raise argparse.ArgumentTypeError(
f"Invalid indicator arguments: {args}. \n Example: -i macd[12,26,9],rsi[14]"
)
return args
def check_positive_float(value) -> float:
"""Argparse type to check positive float."""
new_value = float(value)
if new_value <= 0:
log_and_raise(
argparse.ArgumentTypeError(f"{value} is not a positive float value")
)
return new_value
def check_percentage_range(num) -> float:
"""Check if float is between 0 and 100. If so, return it.
Parameters
----------
num: float
Input float
Returns
-------
num: float
Input number if conditions are met
Raises
------
argparse.ArgumentTypeError
Input number not between min and max values
"""
num = float(num)
maxi = 100.0
mini = 0.0
if num <= mini or num >= maxi:
log_and_raise(argparse.ArgumentTypeError("Value must be between 0 and 100"))
return num
def check_proportion_range(num) -> float:
"""Check if float is between 0 and 1. If so, return it.
Parameters
----------
num: float
Input float
Returns
-------
num: float
Input number if conditions are met
Raises
----------
argparse.ArgumentTypeError
Input number not between min and max values
"""
num = float(num)
maxi = 1.0
mini = 0.0
if num < mini or num > maxi:
log_and_raise(argparse.ArgumentTypeError("Value must be between 0 and 1"))
return num
def valid_date_in_past(s: str) -> datetime:
"""Argparse type to check date is in valid format."""
try:
delta = datetime.now() - datetime.strptime(s, "%Y-%m-%d")
if delta.days < 1:
log_and_raise(
argparse.ArgumentTypeError(
f"Not a valid date: {s}. Must be earlier than today"
)
)
return datetime.strptime(s, "%Y-%m-%d")
except ValueError as value_error:
logging.exception(str(value_error))
raise argparse.ArgumentTypeError(f"Not a valid date: {s}") from value_error
def check_list_dates(str_dates: str) -> List[datetime]:
"""Argparse type to check list of dates provided have a valid format.
Parameters
----------
str_dates: str
string with dates separated by ","
Returns
-------
list_dates: List[datetime]
List of valid dates
"""
list_dates = list()
if str_dates:
if "," in str_dates:
for dt_marker in str_dates.split(","):
list_dates.append(valid_date(dt_marker))
else:
list_dates.append(valid_date(str_dates))
return list_dates
def valid_date(s: str) -> datetime:
"""Argparse type to check date is in valid format."""
try:
return datetime.strptime(s, "%Y-%m-%d")
except ValueError as value_error:
logging.exception(str(value_error))
raise argparse.ArgumentTypeError(f"Not a valid date: {s}") from value_error
def is_valid_date(s: str) -> bool:
"""Check if date is in valid format."""
try:
datetime.strptime(s, "%Y-%m-%d")
return True
except ValueError:
return False
def valid_repo(repo: str) -> str:
"""Argparse type to check github repo is in valid format."""
result = re.search(r"^[a-zA-Z0-9-_.]+\/[a-zA-Z0-9-_.]+$", repo) # noqa: W605
if not result:
log_and_raise(
argparse.ArgumentTypeError(
f"{repo} is not a valid repo. Valid repo: org/repo"
)
)
return repo
def valid_hour(hr: str) -> int:
"""Argparse type to check hour is valid with 24-hour notation."""
new_hr = int(hr)
if (new_hr < 0) or (new_hr > 24):
log_and_raise(
argparse.ArgumentTypeError(f"{hr} doesn't follow 24-hour notion.")
)
return new_hr
def lower_str(string: str) -> str:
"""Convert string to lowercase."""
return string.lower()
def us_market_holidays(years) -> list:
"""Get US market holidays."""
if isinstance(years, int):
years = [
years,
]
# https://www.nyse.com/markets/hours-calendars
market_holidays = [
"Martin Luther King Jr. Day",
"Washington's Birthday",
"Memorial Day",
"Independence Day",
"Labor Day",
"Thanksgiving",
"Christmas Day",
]
# http://www.maa.clell.de/StarDate/publ_holidays.html
good_fridays = {
2010: "2010-04-02",
2011: "2011-04-22",
2012: "2012-04-06",
2013: "2013-03-29",
2014: "2014-04-18",
2015: "2015-04-03",
2016: "2016-03-25",
2017: "2017-04-14",
2018: "2018-03-30",
2019: "2019-04-19",
2020: "2020-04-10",
2021: "2021-04-02",
2022: "2022-04-15",
2023: "2023-04-07",
2024: "2024-03-29",
2025: "2025-04-18",
2026: "2026-04-03",
2027: "2027-03-26",
2028: "2028-04-14",
2029: "2029-03-30",
2030: "2030-04-19",
}
market_and_observed_holidays = market_holidays + [
holiday + " (Observed)" for holiday in market_holidays
]
all_holidays = us_holidays(years=years)
valid_holidays = [
date
for date in list(all_holidays)
if all_holidays[date] in market_and_observed_holidays
]
for year in years:
new_Year = datetime.strptime(f"{year}-01-01", "%Y-%m-%d")
if new_Year.weekday() != 5: # ignore saturday
valid_holidays.append(new_Year.date())
if new_Year.weekday() == 6: # add monday for Sunday
valid_holidays.append(new_Year.date() + timedelta(1))
for year in years:
valid_holidays.append(datetime.strptime(good_fridays[year], "%Y-%m-%d").date())
return valid_holidays
def lambda_long_number_format(num, round_decimal=3) -> Union[str, int, float]:
"""Format a long number."""
if get_current_user().preferences.USE_INTERACTIVE_DF:
return num
if num == float("inf"):
return "inf"
if isinstance(num, float):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
string_fmt = f".{round_decimal}f"
num_str = int(num) if num.is_integer() else f"{num:{string_fmt}}"
return f"{num_str} {' KMBTP'[magnitude]}".strip()
if isinstance(num, int):
num = str(num)
if (
isinstance(num, str)
and num.lstrip("-").isdigit()
and not num.lstrip("-").startswith("0")
and not is_valid_date(num)
):
num = int(num)
num /= 1.0
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
string_fmt = f".{round_decimal}f"
num_str = int(num) if num.is_integer() else f"{num:{string_fmt}}"
return f"{num_str} {' KMBTP'[magnitude]}".strip()
return num
def revert_lambda_long_number_format(num_str: str) -> Union[float, str]:
"""
Revert the formatting of a long number if the input is a formatted number, otherwise return the input as is.
Parameters
----------
num_str : str
The number to remove the formatting.
Returns
-------
Union[float, str]
The number as float (with no formatting) or the input as is.
"""
magnitude_dict = {
"K": 1000,
"M": 1000000,
"B": 1000000000,
"T": 1000000000000,
"P": 1000000000000000,
}
# Ensure the input is a string and not empty
if not num_str or not isinstance(num_str, str):
return num_str
num_as_list = num_str.strip().split()
# If the input string is a number parse it as float
if (
len(num_as_list) == 1
and num_as_list[0].replace(".", "").replace("-", "").isdigit()
and not is_valid_date(num_str)
):
return float(num_str)
# If the input string is a formatted number with magnitude
if (
len(num_as_list) == 2
and num_as_list[1] in magnitude_dict
and num_as_list[0].replace(".", "").replace("-", "").isdigit()
):
num, unit = num_as_list
magnitude = magnitude_dict.get(unit)
if magnitude:
return float(num) * magnitude
# Return the input string as is if it's not a formatted number
return num_str
def lambda_long_number_format_y_axis(df, y_column, ax):
"""Format long number that goes onto Y axis."""
max_values = df[y_column].values.max()
magnitude = 0
while abs(max_values) >= 1000:
magnitude += 1
max_values /= 1000.0
magnitude_sym = " KMBTP"[magnitude]
# Second y label axis -
if magnitude_sym == " ":
ax[2].set_ylabel(f"{y_column}")
else:
ax[2].set_ylabel(f"{y_column} [{magnitude_sym}]")
divider_map = {" ": 1, "K": 1000, "M": 1000000, "B": 1000000000}
divider = divider_map[magnitude_sym]
ax[2].get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, _: int(x / divider))
)
def lambda_clean_data_values_to_float(val: str) -> float:
"""Clean data to float based on string ending."""
# Remove any leading or trailing parentheses and spaces
val = val.strip("( )")
if val == "-":
val = "0"
# Convert percentage to decimal
if val.endswith("%"):
return float(val[:-1]) / 100.0
if val.endswith("B"):
return float(val[:-1]) * 1_000_000_000
if val.endswith("M"):
return float(val[:-1]) * 1_000_000
if val.endswith("K"):
return float(val[:-1]) * 1000
return float(val)
def lambda_int_or_round_float(x) -> str:
"""Format int or round float."""
# If the data is inf, -inf, or NaN then simply return '~' because it is either too
# large, too small, or we do not have data to display for it
if x in (np.inf, -np.inf, np.nan):
return " " + "~"
if (x - int(x) < -sys.float_info.epsilon) or (x - int(x) > sys.float_info.epsilon):
return " " + str(round(x, 2))
return " " + str(int(x))
def divide_chunks(data, n):
"""Split into chunks."""
# looping till length of data
for i in range(0, len(data), n):
yield data[i : i + n] # noqa: E203
def get_next_stock_market_days(last_stock_day, n_next_days) -> list:
"""Get the next stock market day.
Checks against weekends and holidays.
"""
n_days = 0
l_pred_days = []
years: list = []
holidays: list = []
if isinstance(last_stock_day, datetime):
while n_days < n_next_days:
last_stock_day += timedelta(hours=24)
year = last_stock_day.date().year
if year not in years:
years.append(year)
holidays += us_market_holidays(year)
# Check if it is a weekend
if last_stock_day.date().weekday() > 4:
continue
# Check if it is a holiday
if last_stock_day.strftime("%Y-%m-%d") in holidays:
continue
# Otherwise stock market is open
n_days += 1
l_pred_days.append(last_stock_day)
else:
while n_days < n_next_days:
l_pred_days.append(last_stock_day + 1 + n_days)
n_days += 1
return l_pred_days
def is_intraday(df: pd.DataFrame) -> bool:
"""Check if the data granularity is intraday.
Parameters
----------
df : pd.DataFrame
Price data
Returns
-------
bool
True if data is intraday
"""
granularity = df.index[1] - df.index[0]
intraday = not granularity >= timedelta(days=1)
return intraday
def reindex_dates(df: pd.DataFrame) -> pd.DataFrame:
"""Reindex dataframe to exclude non-trading days.
Resets the index of a df to an integer and prepares the 'date' column to become
x tick labels on a plot.
Parameters
----------
df : pd.DataFrame
Source dataframe
Returns
-------
pd.DataFrame
Reindexed dataframe
"""
date_format = "%b %d %H:%M" if is_intraday(df) else "%Y-%m-%d"
reindexed_df = df.reset_index()
reindexed_df["date"] = reindexed_df["date"].dt.strftime(date_format)
return reindexed_df
def get_data(tweet):
"""Get twitter data from API request."""
if "+" in tweet["created_at"]:
s_datetime = tweet["created_at"].split(" +")[0]
else:
s_datetime = iso8601.parse_date(tweet["created_at"]).strftime(
"%Y-%m-%d %H:%M:%S"
)
s_text = tweet["full_text"] if "full_text" in tweet else tweet["text"]
return {"created_at": s_datetime, "text": s_text}
def clean_tweet(tweet: str, symbol: str) -> str:
"""Clean tweets to be fed to sentiment model."""
whitespace = re.compile(r"\s+")
web_address = re.compile(r"(?i)http(s):\/\/[a-z0-9.~_\-\/]+")
ticker = re.compile(rf"(?i)@{symbol}(?=\b)")
user = re.compile(r"(?i)@[a-z0-9_]+")
tweet = whitespace.sub(" ", tweet)
tweet = web_address.sub("", tweet)
tweet = ticker.sub(symbol, tweet)
tweet = user.sub("", tweet)
return tweet
def get_user_agent() -> str:
"""Get a not very random user agent."""
user_agent_strings = [
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.10; rv:86.1) Gecko/20100101 Firefox/86.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:86.1) Gecko/20100101 Firefox/86.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:82.1) Gecko/20100101 Firefox/82.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:86.0) Gecko/20100101 Firefox/86.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:86.0) Gecko/20100101 Firefox/86.0",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.10; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:84.0) Gecko/20100101 Firefox/84.0",
]
return random.choice(user_agent_strings) # nosec # noqa: S311
def text_adjustment_init(self):
"""Adjust text monkey patch for Pandas."""
self.ansi_regx = re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]")
self.encoding = get_option("display.encoding")
def text_adjustment_len(self, text):
"""Get the length of the text adjustment."""
# return compat.strlen(self.ansi_regx.sub("", text), encoding=self.encoding)
return len(self.ansi_regx.sub("", text))
def text_adjustment_justify(self, texts, max_len, mode="right"):
"""Apply 'Justify' text alignment."""
justify = (
str.ljust
if (mode == "left")
else str.rjust
if (mode == "right")
else str.center
)
out = []
for s in texts:
escapes = self.ansi_regx.findall(s)
if len(escapes) == 2:
out.append(
escapes[0].strip()
+ justify(self.ansi_regx.sub("", s), max_len)
+ escapes[1].strip()
)
else:
out.append(justify(s, max_len))
return out
# pylint: disable=unused-argument
def text_adjustment_join_unicode(self, lines, sep=""):
"""Join Unicode."""
try:
return sep.join(lines)
except UnicodeDecodeError:
# sep = compat.text_type(sep)
return sep.join([x.decode("utf-8") if isinstance(x, str) else x for x in lines])
# pylint: disable=unused-argument
def text_adjustment_adjoin(self, space, *lists, **kwargs):
"""Join text."""
# Add space for all but the last column:
pads = ([space] * (len(lists) - 1)) + [0]
max_col_len = max(len(col) for col in lists)
new_cols = []
for col, pad in zip(lists, pads):
width = max(self.len(s) for s in col) + pad
c = self.justify(col, width, mode="left")
# Add blank cells to end of col if needed for different col lens:
if len(col) < max_col_len:
c.extend([" " * width] * (max_col_len - len(col)))
new_cols.append(c)
rows = [self.join_unicode(row_tup) for row_tup in zip(*new_cols)]
return self.join_unicode(rows, sep="\n")
# https://github.com/pandas-dev/pandas/issues/18066#issuecomment-522192922
def patch_pandas_text_adjustment():
"""Set pandas text adjustment settings."""
pandas.io.formats.format.TextAdjustment.__init__ = text_adjustment_init
pandas.io.formats.format.TextAdjustment.len = text_adjustment_len
pandas.io.formats.format.TextAdjustment.justify = text_adjustment_justify
pandas.io.formats.format.TextAdjustment.join_unicode = text_adjustment_join_unicode
pandas.io.formats.format.TextAdjustment.adjoin = text_adjustment_adjoin
def lambda_financials_colored_values(val: str) -> str:
"""Add a color to a value."""
# We don't want to do the color stuff in interactive mode
if get_current_user().preferences.USE_INTERACTIVE_DF:
return val
if val == "N/A" or str(val) == "nan":
val = "[yellow]N/A[/yellow]"
elif sum(c.isalpha() for c in val) < 2:
if "%" in val and "-" in val or "%" not in val and "(" in val:
val = f"[red]{val}[/red]"
elif "%" in val:
val = f"[green]{val}[/green]"
return val
def check_ohlc(type_ohlc: str) -> str:
"""Check that data is in ohlc."""
if bool(re.match("^[ohlca]+$", type_ohlc)):
return type_ohlc
raise argparse.ArgumentTypeError("The type specified is not recognized")
def lett_to_num(word: str) -> str:
"""Match ohlca to integers."""
replacements = [("o", "1"), ("h", "2"), ("l", "3"), ("c", "4"), ("a", "5")]
for a, b in replacements:
word = word.replace(a, b)
return word
AVAILABLE_FLAIRS = {
":openbb": "(🦋)",
":bug": "(🐛)",
":rocket": "(🚀)",
":diamond": "(💎)",
":stars": "(✨)",
":baseball": "(⚾)",
":boat": "(⛵)",
":phone": "(☎)",
":mercury": "(☿)",
":hidden": "",
":sun": "(☼)",
":moon": "(☾)",
":nuke": "(☢)",
":hazard": "(☣)",
":tunder": "(☈)",
":king": "(♔)",
":queen": "(♕)",
":knight": "(♘)",
":recycle": "(♻)",
":scales": "(⚖)",
":ball": "(⚽)",
":golf": "(⛳)",
":piece": "(☮)",
":yy": "(☯)",
}
def get_flair() -> str:
"""Get a flair icon."""
current_user = get_current_user() # pylint: disable=redefined-outer-name
current_flair = str(current_user.preferences.FLAIR)
flair = AVAILABLE_FLAIRS.get(current_flair, current_flair)
if (
current_user.preferences.USE_DATETIME
and get_user_timezone_or_invalid() != "INVALID"
):
dtime = datetime.now(pytz.timezone(get_user_timezone())).strftime(
"%Y %b %d, %H:%M"
)
# if there is no flair, don't add an extra space after the time
if flair == "":
return f"{dtime}"
return f"{dtime} {flair}"
return flair
def is_timezone_valid(user_tz: str) -> bool:
"""Check whether user timezone is valid.
Parameters
----------
user_tz: str
Timezone to check for validity
Returns
-------
bool
True if timezone provided is valid
"""
return user_tz in pytz.all_timezones
def get_user_timezone() -> str:
"""Get user timezone if it is a valid one.
Returns
-------
str
user timezone based on .env file
"""
return get_current_user().preferences.TIMEZONE
def get_user_timezone_or_invalid() -> str:
"""Get user timezone if it is a valid one.
Returns
-------
str
user timezone based on timezone.openbb file or INVALID
"""
user_tz = get_user_timezone()
if is_timezone_valid(user_tz):
return f"{user_tz}"
return "INVALID"
def str_to_bool(value) -> bool:
"""Match a string to a boolean value."""
if isinstance(value, bool):
return value
if value.lower() in {"false", "f", "0", "no", "n"}:
return False
if value.lower() in {"true", "t", "1", "yes", "y"}:
return True
raise ValueError(f"{value} is not a valid boolean value")
def get_screeninfo():
"""Get screeninfo."""
try:
screens = get_monitors() # Get all available monitors
except Exception:
return None
if screens:
current_user = get_current_user()
if (
len(screens) - 1 < current_user.preferences.MONITOR
): # Check to see if chosen monitor is detected
monitor = 0
console.print(
f"Could not locate monitor {current_user.preferences.MONITOR}, using primary monitor."
)
else:
monitor = current_user.preferences.MONITOR
main_screen = screens[monitor] # Choose what monitor to get
return (main_screen.width, main_screen.height)
return None
def plot_autoscale():
"""Autoscale plot."""
current_user = get_current_user()
screen_info = get_screeninfo()
if current_user.preferences.USE_PLOT_AUTOSCALING and screen_info:
x, y = screen_info # Get screen size
# account for ultrawide monitors
if x / y > 1.5:
x = x * 0.4
x = ((x) * current_user.preferences.PLOT_WIDTH_PERCENTAGE * 10**-2) / (
current_user.preferences.PLOT_DPI
) # Calculate width
if current_user.preferences.PLOT_HEIGHT_PERCENTAGE == 100: # If full height
y = y - 60 # Remove the height of window toolbar
y = ((y) * current_user.preferences.PLOT_HEIGHT_PERCENTAGE * 10**-2) / (
current_user.preferences.PLOT_DPI
)
else: # If not autoscale, use size defined in config_plot.py
x = current_user.preferences.PLOT_WIDTH / (current_user.preferences.PLOT_DPI)
y = current_user.preferences.PLOT_HEIGHT / (current_user.preferences.PLOT_DPI)
return x, y
def get_last_time_market_was_open(dt):
"""Get last time the US market was open."""
# Check if it is a weekend
if dt.date().weekday() > 4:
dt = get_last_time_market_was_open(dt - timedelta(hours=24))
# Check if it is a holiday
if dt.strftime("%Y-%m-%d") in us_holidays():
dt = get_last_time_market_was_open(dt - timedelta(hours=24))
dt = dt.replace(hour=21, minute=0, second=0)
return dt
def check_file_type_saved(valid_types: Optional[List[str]] = None):
"""Provide valid types for the user to be able to select.
Parameters
----------
valid_types: List[str]
List of valid types to export data
Returns
-------
check_filenames: Optional[List[str]]
Function that returns list of filenames to export data
"""
def check_filenames(filenames: str = "") -> str:
"""Check if filenames are valid.
Parameters
----------
filenames: str
filenames to be saved separated with comma
Returns
----------
str
valid filenames separated with comma
"""
if not filenames or not valid_types:
return ""
valid_filenames = list()
for filename in filenames.split(","):
if filename.endswith(tuple(valid_types)):
valid_filenames.append(filename)
else:
console.print(
f"[red]Filename '{filename}' provided is not valid!\nPlease use one of the following file types:"
f"{','.join(valid_types)}[/red]\n"
)
return ",".join(valid_filenames)
return check_filenames
def compose_export_path(func_name: str, dir_path: str) -> Path:
"""Compose export path for data from the terminal.
Creates a path to a folder and a filename based on conditions.
Parameters
----------
func_name : str
Name of the command that invokes this function
dir_path : str
Path of directory from where this function is called
Returns
-------
Path
Path variable containing the path of the exported file
"""
now = datetime.now()
# Resolving all symlinks and also normalizing path.
resolve_path = Path(dir_path).resolve()
# Getting the directory names from the path. Instead of using split/replace (Windows doesn't like that)
# check if this is done in a main context to avoid saving with openbb_terminal
if resolve_path.parts[-2] == "openbb_terminal":
path_cmd = f"{resolve_path.parts[-1]}"
else:
path_cmd = f"{resolve_path.parts[-2]}_{resolve_path.parts[-1]}"
default_filename = f"{now.strftime('%Y%m%d_%H%M%S')}_{path_cmd}_{func_name}"
full_path = get_current_user().preferences.USER_EXPORTS_DIRECTORY / default_filename
return full_path
def ask_file_overwrite(file_path: Path) -> Tuple[bool, bool]:
"""Helper to provide a prompt for overwriting existing files.
Returns two values, the first is a boolean indicating if the file exists and the
second is a boolean indicating if the user wants to overwrite the file.
"""
# Jeroen asked for a flag to overwrite no matter what
current_user = get_current_user()
if current_user.preferences.FILE_OVERWRITE:
return False, True
if get_current_system().TEST_MODE:
return False, True
if file_path.exists():
overwrite = input("\nFile already exists. Overwrite? [y/n]: ").lower()
if overwrite == "y":
file_path.unlink(missing_ok=True)
# File exists and user wants to overwrite
return True, True
# File exists and user does not want to overwrite
return True, False
# File does not exist
return False, True
# This is a false positive on pylint and being tracked in pylint #3060
# pylint: disable=abstract-class-instantiated
def export_data(
export_type: str,
dir_path: str,
func_name: str,
df: pd.DataFrame = pd.DataFrame(),
sheet_name: Optional[str] = None,
figure: Optional[OpenBBFigure] = None,
margin: bool = True,
) -> None:
"""Export data to a file.
Parameters
----------
export_type : str
Type of export between: csv,json,xlsx,xls
dir_path : str
Path of directory from where this function is called
func_name : str
Name of the command that invokes this function
df : pd.Dataframe
Dataframe of data to save
sheet_name : str
If provided. The name of the sheet to save in excel file
figure : Optional[OpenBBFigure]
Figure object to save as image file
margin : bool
Automatically adjust subplot parameters to give specified padding.
"""
if export_type:
saved_path = compose_export_path(func_name, dir_path).resolve()
saved_path.parent.mkdir(parents=True, exist_ok=True)
for exp_type in export_type.split(","):
# In this scenario the path was provided, e.g. --export pt.csv, pt.jpg
if "." in exp_type:
saved_path = saved_path.with_name(exp_type)
# In this scenario we use the default filename
else:
if ".OpenBB_openbb_terminal" in saved_path.name:
saved_path = saved_path.with_name(
saved_path.name.replace(
".OpenBB_openbb_terminal", "OpenBBTerminal"
)
)
saved_path = saved_path.with_suffix(f".{exp_type}")
exists, overwrite = False, False
is_xlsx = exp_type.endswith("xlsx")
if sheet_name is None and is_xlsx or not is_xlsx:
exists, overwrite = ask_file_overwrite(saved_path)
if exists and not overwrite:
existing = len(list(saved_path.parent.glob(saved_path.stem + "*")))
saved_path = saved_path.with_stem(f"{saved_path.stem}_{existing + 1}")
df = df.replace(
{
r"\[yellow\]": "",
r"\[/yellow\]": "",
r"\[green\]": "",
r"\[/green\]": "",
r"\[red\]": "",
r"\[/red\]": "",
r"\[magenta\]": "",
r"\[/magenta\]": "",
},
regex=True,
)
df = df.applymap(revert_lambda_long_number_format)
if exp_type.endswith("csv"):
df.to_csv(saved_path)
elif exp_type.endswith("json"):
df.reset_index(drop=True, inplace=True)
df.to_json(saved_path)
elif exp_type.endswith("xlsx"):
# since xlsx does not support datetimes with timezones we need to remove it
df = remove_timezone_from_dataframe(df)
if sheet_name is None: # noqa: SIM223
df.to_excel(
saved_path,
index=True,
header=True,
)
elif saved_path.exists():
with pd.ExcelWriter(
saved_path,
mode="a",
if_sheet_exists="new",
engine="openpyxl",
) as writer:
df.to_excel(
writer, sheet_name=sheet_name, index=True, header=True
)
else:
with pd.ExcelWriter(
saved_path,
engine="openpyxl",
) as writer:
df.to_excel(
writer, sheet_name=sheet_name, index=True, header=True
)
elif saved_path.suffix in [".jpg", ".pdf", ".png", ".svg"]:
if figure is None:
console.print("No plot to export.")
continue
figure.show(export_image=saved_path, margin=margin)
else:
console.print("Wrong export file specified.")
continue
console.print(f"Saved file: {saved_path}")
if figure is not None:
figure._exported = True # pylint: disable=protected-access
def get_rf() -> float:
"""Use the fiscaldata.gov API to get most recent T-Bill rate.
Returns
-------
rate : float
The current US T-Bill rate
"""
try:
base = "https://api.fiscaldata.treasury.gov/services/api/fiscal_service"
end = "/v2/accounting/od/avg_interest_rates"
filters = "?filter=security_desc:eq:Treasury Bills&sort=-record_date"
response = request(base + end + filters)
latest = response.json()["data"][0]
return round(float(latest["avg_interest_rate_amt"]) / 100, 8)
except Exception:
return 0.02
def system_clear():
"""Clear screen."""
os.system("cls||clear") # nosec # noqa: S605,S607
def excel_columns() -> List[str]:
"""Return potential columns for excel.
Returns
-------
letters : List[str]
Letters to be used as excel columns
"""
letters = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M"]
letters += ["N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
opts = (
[f"{x}" for x in letters]
+ [f"{x}{y}" for x in letters for y in letters]
+ [f"{x}{y}{z}" for x in letters for y in letters for z in letters]
)
return opts
def handle_error_code(requests_obj, error_code_map):
"""Handle error code of HTTP requests.
Parameters
----------
requests_obj: Object
Request object
error_code_map: Dict
Dictionary mapping of HTTP error code and output message
"""
for error_code, error_msg in error_code_map.items():
if requests_obj.status_code == error_code:
console.print(error_msg)
def prefill_form(ticket_type, menu, path, command, message):
"""Pre-fill Google Form and open it in the browser."""
form_url = "https://my.openbb.co/app/terminal/support?"
params = {
"type": ticket_type,
"menu": menu,
"path": path,
"command": command,
"message": message,
}
url_params = urllib.parse.urlencode(params)
webbrowser.open(form_url + url_params)
def get_closing_price(ticker, days):
"""Get historical close price for n days in past for market asset.
Parameters
----------
ticker : str
Ticker to get data for
days : datetime
No. of days in past
Returns
-------
data : pd.DataFrame
Historic close prices for ticker for given days
"""
tick = yf.Ticker(ticker)
df = tick.history(
start=d.today() - timedelta(days=days),
interval="1d",
)["Close"]
df = df.to_frame().reset_index()
df = df.rename(columns={0: "Close"})
df.index.name = "index"
return df
def camel_case_split(string: str) -> str:
"""Convert a camel-case string to separate words.
Parameters
----------
string : str
The string to be converted
Returns
-------
new_string: str
The formatted string
"""
words = [[string[0]]]
for c in string[1:]:
if words[-1][-1].islower() and c.isupper():
words.append(list(c))
else:
words[-1].append(c)
results = ["".join(word) for word in words]
return " ".join(results).title()
def is_valid_axes_count(
axes: List[plt.Axes],
n: int,
custom_text: Optional[str] = None,
prefix_text: Optional[str] = None,
suffix_text: Optional[str] = None,
):
"""Check if axes list length is equal to n and log text if check result is false.
Parameters
----------
axes: List[plt.Axes]
External axes (2 axes are expected in the list)
n: int
number of expected axes
custom_text: Optional[str] = None
custom text to log
prefix_text: Optional[str] = None
prefix text to add before text to log
suffix_text: Optional[str] = None
suffix text to add after text to log
"""
if len(axes) == n:
return True
print_text = (
custom_text
if custom_text
else f"Expected list of {n} axis item{'s' if n > 1 else ''}."
)
if prefix_text:
print_text = f"{prefix_text} {print_text}"
if suffix_text:
print_text = f"{suffix_text} {print_text}"
logger.error(print_text)
console.print(f"[red]{print_text}\n[/red]")
return False
def support_message(s: str) -> str:
"""Argparse type to check string is in valid format for the support command."""
return s.replace('"', "")
def check_list_values(valid_values: List[str]):
"""Get valid values to test arguments given by user.
Parameters
----------
valid_values: List[str]
List of valid values to be checked
Returns
-------
check_list_values_from_valid_values_list:
Function that ensures that the valid values go through and notifies user when value is not valid.
"""
# Define the function with default arguments
def check_list_values_from_valid_values_list(given_values: str) -> List[str]:
"""Check if argparse argument is an str format.
Ensure that value1,value2,value3 and that the values value1, value2 and value3 are valid.
Parameters
----------
given_values: str
values provided by the user
Raises
------
argparse.ArgumentTypeError
Input number not between min and max values
"""
success_values = list()
values_found = (
[val.strip() for val in given_values.split(",")]
if "," in given_values
else [given_values]
)
for value in values_found:
# check if the value is valid
if value in valid_values:
success_values.append(value)
else:
console.print(f"[red]'{value}' is not valid.[/red]")
if not success_values:
log_and_raise(
argparse.ArgumentTypeError("No correct arguments have been found")
)
return success_values
# Return function handle to checking function
return check_list_values_from_valid_values_list
def search_wikipedia(expression: str) -> None:
"""Search wikipedia for a given expression.
Parameters
----------
expression: str
Expression to search for
"""
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{expression}"
response = requests.request("GET", url, headers={}, data={})
if response.status_code == 200:
response_json = json.loads(response.text)
res = {
"title": response_json["title"],
"url": f"{response_json['content_urls']['desktop']['page']}",
"summary": response_json["extract"],
}
else:
res = {
"title": "[red]Not Found[/red]",
}
df = pd.json_normalize(res)
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"Wikipedia results for {expression}",
)
def screenshot() -> None:
"""Screenshot the terminal window or the plot window.
Parameters
----------
terminal_window_target: bool
Target the terminal window
"""
try:
if plt.get_fignums():
img_buf = io.BytesIO()
plt.savefig(img_buf, format="png")
shot = Image.open(img_buf)
screenshot_to_canvas(shot, plot_exists=True)
else:
console.print("No plots found.\n")
except Exception as err:
console.print(f"Cannot reach window - {err}\n")
def screenshot_to_canvas(shot, plot_exists: bool = False):
"""Frame image to OpenBB canvas.
Parameters
----------
shot
Image to frame with OpenBB Canvas
plot_exists: bool
Variable to say whether the image is a plot or screenshot of terminal
"""
WHITE_LINE_WIDTH = 3
OUTSIDE_CANVAS_WIDTH = shot.width + 4 * WHITE_LINE_WIDTH + 5
OUTSIDE_CANVAS_HEIGHT = shot.height + 4 * WHITE_LINE_WIDTH + 5
UPPER_SPACE = 40
BACKGROUND_WIDTH_SLACK = 150
BACKGROUND_HEIGHT_SLACK = 150
background = Image.open(
Path(os.path.abspath(__file__), "../../images/background.png")
)
logo = Image.open(
Path(os.path.abspath(__file__), "../../images/openbb_horizontal_logo.png")
)
try:
if plot_exists:
HEADER_HEIGHT = 0
RADIUS = 8
background = background.resize(
(
shot.width + BACKGROUND_WIDTH_SLACK,
shot.height + BACKGROUND_HEIGHT_SLACK,
)
)
x = int((background.width - OUTSIDE_CANVAS_WIDTH) / 2)
y = UPPER_SPACE
white_shape = (
(x, y),
(x + OUTSIDE_CANVAS_WIDTH, y + OUTSIDE_CANVAS_HEIGHT),
)
img = ImageDraw.Draw(background)
img.rounded_rectangle(
white_shape,
fill="black",
outline="white",
width=WHITE_LINE_WIDTH,
radius=RADIUS,
)
background.paste(shot, (x + WHITE_LINE_WIDTH + 5, y + WHITE_LINE_WIDTH + 5))
# Logo
background.paste(
logo,
(
int((background.width - logo.width) / 2),
UPPER_SPACE
+ OUTSIDE_CANVAS_HEIGHT
+ HEADER_HEIGHT
+ int(
(
background.height
- UPPER_SPACE
- OUTSIDE_CANVAS_HEIGHT
- HEADER_HEIGHT
- logo.height
)
/ 2
),
),
logo,
)
background.show(title="screenshot")
except Exception:
console.print("Shot failed.")
@lru_cache
def load_json(path: Path) -> Dict[str, str]:
"""Load a dictionary from a json file path.
Parameter
----------
path : Path
The path for the json file
Returns
-------
Dict[str, str]
The dictionary loaded from json
"""
try:
with open(path) as file:
return json.load(file)
except Exception as e:
console.print(
f"[red]Failed to load preferred source from file: "
f"{get_current_user().preferences.USER_DATA_SOURCES_FILE}[/red]"
)
console.print(f"[red]{e}[/red]")
return {}
def list_from_str(value: str) -> List[str]:
"""Convert a string to a list.
Parameter
---------
value : str
The string to convert
Returns
-------
new_value: List[str]
The list of strings
"""
if value:
return value.split(",")
return []
def str_date_to_timestamp(date: str) -> int:
"""Transform string date to timestamp
Parameters
----------
start_date : str
Initial date, format YYYY-MM-DD
Returns
-------
date_ts : int
Initial date timestamp (e.g., 1_614_556_800)
"""
date_ts = int(
datetime.strptime(date + " 00:00:00+0000", "%Y-%m-%d %H:%M:%S%z").timestamp()
)
return date_ts
def check_start_less_than_end(start_date: str, end_date: str) -> bool:
"""Check if start_date is equal to end_date.
Parameters
----------
start_date : str
Initial date, format YYYY-MM-DD
end_date : str
Final date, format YYYY-MM-DD
Returns
-------
bool
True if start_date is not equal to end_date, False otherwise
"""
if start_date is None or end_date is None:
return False
if start_date == end_date:
console.print("[red]Start date and end date cannot be the same.[/red]")
return True
if start_date > end_date:
console.print("[red]Start date cannot be greater than end date.[/red]")
return True
return False
# Write an abstract helper to make requests from a url with potential headers and params
def request(
url: str, method: str = "get", timeout: int = 0, **kwargs
) -> requests.Response:
"""Abstract helper to make requests from a url with potential headers and params.
Parameters
----------
url : str
Url to make the request to
method : str
HTTP method to use. Choose from:
delete, get, head, patch, post, put, by default "get"
timeout : int
How many seconds to wait for the server to send data
Returns
-------
requests.Response
Request response object
Raises
------
ValueError
If invalid method is passed
"""
method = method.lower()
if method not in ["delete", "get", "head", "patch", "post", "put"]:
raise ValueError(f"Invalid method: {method}")
current_user = get_current_user()
# We want to add a user agent to the request, so check if there are any headers
# If there are headers, check if there is a user agent, if not add one.
# Some requests seem to work only with a specific user agent, so we want to be able to override it.
headers = kwargs.pop("headers", {})
timeout = timeout or current_user.preferences.REQUEST_TIMEOUT
if "User-Agent" not in headers:
headers["User-Agent"] = get_user_agent()
func = getattr(requests, method)
return func(
url,
headers=headers,
timeout=timeout,
**kwargs,
)
def remove_timezone_from_dataframe(df: pd.DataFrame) -> pd.DataFrame:
"""
Remove timezone information from a dataframe.
Parameters
----------
df : pd.DataFrame
The dataframe to remove timezone information from
Returns
-------
pd.DataFrame
The dataframe with timezone information removed
"""
date_cols = []
index_is_date = False
# Find columns and index containing date data
if (
df.index.dtype.kind == "M"
and hasattr(df.index.dtype, "tz")
and df.index.dtype.tz is not None
):
index_is_date = True
for col, dtype in df.dtypes.items():
if dtype.kind == "M" and hasattr(df.index.dtype, "tz") and dtype.tz is not None:
date_cols.append(col)
# Remove the timezone information
for col in date_cols:
df[col] = df[col].dt.date
if index_is_date:
index_name = df.index.name
df.index = df.index.date
df.index.name = index_name
return df
@check_api_key(["API_OPENAI_KEY"])
def query_LLM_local(query_text, gpt_model):
current_user = get_current_user()
os.environ["OPENAI_API_KEY"] = current_user.credentials.API_OPENAI_KEY
# check if index exists
index_path = GPT_INDEX_DIRECTORY / f"index_{GPT_INDEX_VER}.json"
old_index_paths = [
str(x) for x in GPT_INDEX_DIRECTORY.glob("index_*.json") if x != index_path
]
# define LLM
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name=gpt_model))
# define prompt helper
prompt_helper = PromptHelper(context_window=4096, num_output=256)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
if os.path.exists(index_path):
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=index_path)
index = load_index_from_storage(
service_context=service_context, storage_context=storage_context
)
else:
# If the index file doesn't exist or is of incorrect version, generate a new one
# First, remove old version(s), if any
for path in old_index_paths:
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
# Then, generate and save new index
# import from print console and say generating index, this might take a while
console.print("Generating index, this might take a while....\n")
# read in documents
documents = SimpleDirectoryReader(GPT_INDEX_DIRECTORY / "data/").load_data()
index = VectorStoreIndex.from_documents(
documents, service_context=service_context
)
# save to disk
console.print("Saving index to disk....\n")
index.storage_context.persist(index_path)
current_date = datetime.now().astimezone(pytz.timezone("America/New_York"))
prompt_string = f"""From the cli argparse help text above, provide the terminal
command for {query_text}. If relevant, use the examples as guidance.
Provide the exact command along with the parent command with a "/" separation to get that information,
and nothing else including any explanation. Don't add any other word such as 'Command to get', 'Answer'
or the likes. If you do not know, reply "I don't know"
Current date: {current_date.strftime("%Y-%m-%d")}
Current day of the week: {current_date.strftime("%A")}
Remember:
1. It is very important to provide the full path of the command including the parent command and loading
the particular target before running any subsequent commands
2. If you are asked about dates or times, load the target dates, times span during the "load" command
before running any subsequent commands. replace all <> with the actual dates and times. The date format should
be YYYY-MM-DD. If there is no date included in the query, do not specify any.
3. Country names should be snake case and lower case. example: united_states.
4. Always use a comma to separate between countries and no spaces: example: united_states,italy,spain
5. Always use "load" command first before running any subsequent commands. example:
stocks/load <symbol>/ ....
crypto/load <symbol>/ .... etc.
6. Do not include --export unless the request asks for the data to be exported or saved to a specific file type.
7. Do not make up any subcommands or options for the specific command.
8. Do not provide anything that could be interpreted as investment advice.
9. Any request asking for options refers to stocks/options.
Only do what is asked and only provide a single command string, never more than one.
"""
# try to get the response from the index
try:
query_engine = index.as_query_engine()
response = query_engine.query(prompt_string)
return response.response, response.source_nodes
except Exception as e:
# check if the error has the following "The model: `gpt-4` does not exist"
if "The model: `gpt-4` does not exist" in str(e):
console.print(
"[red]You do not have access to GPT4 model with your API key."
" Please try again with valid API Access.[/red]"
)
return None
console.print(f"[red]Something went wrong with the query. {e}[/red]")
return None, None
def query_LLM_remote(query_text: str):
"""Query askobb on gpt-3.5 turbo hosted model
Parameters
----------
query_text : str
Query string for askobb
"""
url = "https://api.openbb.co/askobb"
data = {"prompt": query_text, "accessToken": get_current_user().profile.token}
ask_obbrequest_data = request(url, method="POST", json=data, timeout=15).json()
if "error" in ask_obbrequest_data:
console.print(f"[red]{ask_obbrequest_data['error']}[/red]")
return None, None
return ask_obbrequest_data["response"], ask_obbrequest_data["source_nodes"]
def check_valid_date(date_string) -> bool:
""" "Helper to see if we can parse the string to a date"""
try:
# Try to parse the string with strptime()
datetime.strptime(
date_string, "%Y-%m-%d"
) # Use the format your dates are expected to be in
return True # If it can be parsed, then it is a valid date string
except ValueError: # strptime() throws a ValueError if the string can't be parsed
return False
| [] |
2024-01-10 | bflaven/BlogArticlesExamples | google_trends_sitemap~chainlit~002_chainlit_langchain_python.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
[env]
# Conda Environment
# NO CONDA ENV
conda create --name chainlit_python python=3.9.13
conda info --envs
source activate chainlit_python
conda deactivate
# if needed to remove
conda env remove -n [NAME_OF_THE_CONDA_ENVIRONMENT]
# examples
conda env remove -n po_launcher_e2e_cypress
conda env remove -n parse_website
# update conda
conda update -n base -c defaults conda
# to export requirements
pip freeze > requirements_chainlit_python.txt
# to install
pip install -r requirements_chainlit_python.txt
[path]
cd /Users/brunoflaven/Documents/01_work/blog_articles/google_trends_sitemap/chainlit
[file]
chainlit run 002_chainlit_langchain_python.py -w
The -w flag tells Chainlit to enable auto-reloading, so you don’t need to restart the server every time you make changes to your application. Your chatbot UI should now be accessible at http://localhost:8000.
# other module
# go to the env
# for chainlit
pip install chainlit
Source: https://docs.chainlit.io/pure-python
"""
import os
from langchain import PromptTemplate, OpenAI, LLMChain
import chainlit as cl
# add your own api key for open ai
os.environ["OPENAI_API_KEY"] = "YOUR_OPEN_AI_API_KEY"
template = """Question: {question}
Answer: Let's think step by step."""
@cl.langchain_factory
def factory():
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=OpenAI(
temperature=0), verbose=True)
return llm_chain
| [
"question",
"Question: {question}\n\nAnswer: Let's think step by step."
] |
2024-01-10 | digitalnomd/LLM-VM | src~llm_vm~completion~test_optimize.py | from dotenv import load_dotenv
import os
import openai
import sys
from optimize import *
haskell = '''
def call_gpt(state, cur_prompt: str, stop: str, max_tokens = 20, quality = "best", temperature = 0.0):
if state.verbose > 1:
print_op("\nGPT input for {" +stop + "} "+ str(len(cur_prompt)) + ".")
if state.verbose > 2:
print_op(prepPrintPromptContext(cur_prompt))
ask_tokens = max_tokens + len(cur_prompt) / 2.7
if state.verbose > 0:
print_op("ASK_TOKENS:", ask_tokens)
if (ask_tokens) > 2049:
quality = 'best'
model = { 'best' : ("text-davinci-003", 0.02),
'okay' : ("text-curie-001", 0.002),
}[quality]
def calcCost(p):
return (len(p) / 2700.0) * model[1]
cost = calcCost(cur_prompt)
try:
ans = openai.Completion.create(
model=model[0],
max_tokens=max_tokens,
stop=stop,
prompt=cur_prompt,
temperature=temperature
)
except Exception as e:
print_op("WTF:", e)
state.price += cost
return "OpenAI is down!"
response_text = ans['choices'][0]['text']
simpleprice = model[1] * ans['usage']['total_tokens'] / 1000
if state.verbose > 0:
print_op("SimplePrice: $"+str(simpleprice))
state.price += simpleprice
if state.verbose > 2:
print_op("GPT output:")
print_op(prepPrintPromptContext(response_text))
print_op("GPT output fin.\n")
return response_text
def delete_file(file_name):
location = os.getcwd()
path = os.path.join(location, file_name)
os.remove(path)
return True
def call_ChatGPT(cur_prompt, stop = None, max_tokens = 20, temperature = 0.2, gpt4 = False):
ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0301" if not gpt4 else 'gpt-4',
max_tokens=max_tokens,
stop=stop,
messages=cur_prompt,
temperature=temperature)
return ans['choices'][0]['message']['content']
return response_text
def call_gpt(cur_prompt: str, stop: str, max_tokens = 20, quality = "best", temperature = 0.0, model = "text-davinci-003"):
ans = openai.Completion.create(
model=model,
max_tokens=max_tokens,
stop=stop,
prompt=cur_prompt,
temperature=temperature
)
return ans['choices'][0]['text']
import gzip
import json
def create_jsonl_file(data_list: list, file_name: str, compress: bool = True) -> None:
"""
Method saves list of dicts into jsonl file.
:param data: (list) list of dicts to be stored,
:param filename: (str) path to the output file. If suffix .jsonl is not given then methods appends
.jsonl suffix into the file.
:param compress: (bool) should file be compressed into a gzip archive?
"""
sjsonl = '.jsonl'
sgz = '.gz'
# Check filename
if not file_name.endswith(sjsonl):
file_name = file_name + sjsonl
# Save data
if compress:
file_name = file_name + sgz
with gzip.open(file_name, 'w') as compressed:
for ddict in data_list:
jout = json.dumps(ddict) + '\n'
jout = jout.encode('utf-8')
compressed.write(jout)
else:
with open(file_name, 'w') as out:
for ddict in data_list:
jout = json.dumps(ddict) + '\n'
out.write(jout)
return file_name, open(file_name, "rb")
'''
if __name__ == "__main__":
load_dotenv()
openai.api_key = os.getenv('OPENAI_KEY')
anarchy_key = os.getenv('ANARCHY_KEY')
print("key:", openai.api_key)
optimizer = LocalOptimizer(MIN_TRAIN_EXS=2)
#optimizer = HostedOptimizer(openai_key = openai.api_key,
# anarchy_key = anarchy_key,
# MIN_TRAIN_EXS=2)
i = 0
for h in haskell.splitlines():
print("At: ", i)
try:
print(optimizer.complete("Please convert this line to some haskell:", h + "\nHaskell:", max_tokens = 100, temperature = 0.0))
except Exception as e:
print('E:', e)
time.sleep(2)
if i > 3 and i < 20:
time.sleep(120)
i += 1
| [] |
2024-01-10 | digitalnomd/LLM-VM | src~llm_vm~completion~optimize.py | import openai
import traceback
import threading
import time
import os
import json
import tempfile
import abc
import requests
import hashlib
def generate_hash(input_string):
sha256_hash = hashlib.sha256()
sha256_hash.update(str(input_string).encode('utf-8'))
return int(sha256_hash.hexdigest(), 16) % 10**18
def asyncStart(foo):
t = [None, None]
def new_thread():
t[0] = foo()
t[1] = threading.Thread(target=new_thread)
t[1].start()
return t
def asyncAwait(t):
t[1].join()
return t[0]
class local_ephemeral:
def __init__(self):
self.training_store = {}
def get_data(self, c_id):
self.init_if_null(c_id)
return self.training_store[c_id]["data"]
def add_example(self, c_id, example):
self.init_if_null(c_id)
self.training_store[c_id]["data"] += [example]
def set_training_in_progress(self, c_id, is_training):
self.init_if_null(c_id)
self.training_store[c_id]["is_training"] = is_training
def get_training_in_progress_set_true(self, c_id):
self.init_if_null(c_id)
# TODO: this could cause a concurrency bug when distributed!
self.training_store[c_id]['lock'].acquire()
old_val = self.training_store[c_id]["is_training"]
if not old_val:
self.training_store[c_id]["is_training"] = True
self.training_store[c_id]['lock'].release()
return old_val
def set_model(self, c_id, model_id):
self.init_if_null(c_id)
self.training_store[c_id]["model"] = model_id
def get_model(self, c_id):
self.init_if_null(c_id)
return self.training_store[c_id]["model"]
def init_if_null(self, c_id):
if not c_id in self.training_store:
self.training_store[c_id] = { "is_training": False,
'lock' : threading.Lock(),
"data": [],
"model": None }
def CALL_BIG(prompt, gpt4=False, **kwargs):
cur_prompt = [{'role': "system", 'content' : prompt}]
print("CUR_PROMPT:", cur_prompt, flush=True)
print("KWARGS:", kwargs, flush=True)
ans = openai.ChatCompletion.create(
messages=cur_prompt,
model="gpt-3.5-turbo-0301" if not gpt4 else 'gpt-4',
**kwargs)
return ans['choices'][0]['message']['content']
def CALL_SMALL(*args, **kwargs):
ans = openai.Completion.create(*args, **kwargs)
return ans['choices'][0]['text']
class Optimizer:
@abc.abstractmethod
def complete(self, stable_context, dynamic_prompt, **kwargs):
"""
Runs a completion using the string stable_context+dynamic_prompt. Returns an optional training closure to use if the
caller decides that the completion was particularly good.
This method first checks if a model exists for the stable_context. If it does, it uses the model to complete the prompt.
It then checks if the number of training examples is less than the maximum allowable. If it is, or if a model wasn't
previously found, it retrieves the best completion for the prompt using a larger model, adds a new datapoint for training,
and potentially fine-tunes a new model using the updated data, storing the new model if successful.
The function returns the best completion (either generated by the stored model or the larger model).
This can not handle cases where either stable_context or dynamic_prompt are just whitespace!
Parameters:
- stable_context (str): Stable contextual data to use as a basis for training.
- dynamic_prompt (str): The dynamic data to generate a completion for and potentially add to training data.
- **kwargs: Additional arguments to be passed to the `call_small` and `call_big` methods.
Returns:
- completion (str): The best completion for the dynamic prompt, as generated by either the stored model or the larger model.
"""
class HostedOptimizer(Optimizer):
def __init__(self, anarchy_key, openai_key, MIN_TRAIN_EXS=20, MAX_TRAIN_EXS = 2000):
self.anarchy_key = anarchy_key
self.openai_key = openai_key
self.MIN_TRAIN_EXS = MIN_TRAIN_EXS
self.MAX_TRAIN_EXS = MAX_TRAIN_EXS
def complete(self, stable_context, dynamic_prompt, **kwargs):
"""
TODO: Runs the optimizing completion process on anarchy's hosted server with persistence.
Parameters:
- stable_context (str): Stable contextual data to use as a basis for training.
- dynamic_prompt (str): The dynamic data to generate a completion for and potentially add to training data.
- **kwargs: Additional arguments to be passed to the `call_small` and `call_big` methods.
Returns:
- completion (str): The best completion for the dynamic prompt, as generated by either the stored model or the larger model.
"""
url = "https://api.chat.dev/completion/optimizing"
payload = {**kwargs,
'stable_context': stable_context,
'dynamic_prompt': dynamic_prompt,
'anarchy_key' : self.anarchy_key,
'openai_key' : self.openai_key,
'MIN_TRAIN_EXS' : self.MIN_TRAIN_EXS,
'MAX_TRAIN_EXS' : self.MAX_TRAIN_EXS
}
headers = {'Authorization': f'Bearer {self.anarchy_key}'}
print("Payload: ", payload)
try:
response = requests.post(url, json=payload, headers=headers)
response.raise_for_status() # Raise an exception for 4XX and 5XX status codes
return response.json()['completion']
except requests.exceptions.RequestException as e:
print("Error occurred:", e)
class LocalOptimizer(Optimizer):
def __init__(self, storage=local_ephemeral(), MIN_TRAIN_EXS = 20, MAX_TRAIN_EXS = 2000, call_small = CALL_SMALL, call_big = CALL_BIG):
self.storage = storage
self.MIN_TRAIN_EXS = MIN_TRAIN_EXS
self.MAX_TRAIN_EXS = MAX_TRAIN_EXS
self.call_small = call_small
self.call_big = call_big
def complete(self, stable_context, dynamic_prompt, **kwargs):
completion, train = self.complete_delay_train(stable_context, dynamic_prompt, **kwargs)
train()
return completion
def complete_delay_train(self, stable_context, dynamic_prompt, c_id = None, **kwargs):
"""
Runs a completion using the string stable_context+dynamic_prompt. Returns an optional training closure to use if the
caller decides that the completion was particularly good.
This method first checks if a model exists for the stable_context. If it does, it uses the model to complete the prompt.
It then checks if the number of training examples is less than the maximum allowable. If it is, or if a model wasn't
previously found, it retrieves the best completion for the prompt using a larger model, adds a new datapoint for training,
and potentially fine-tunes a new model using the updated data, storing the new model if successful.
The function returns the best completion (either generated by the stored model or the larger model), and a closure
function that encapsulates the fine-tuning process for potential execution at a later time.
Parameters:
- stable_context (str): Stable contextual data to use as a basis for training.
- dynamic_prompt (str): The dynamic data to generate a completion for and potentially add to training data.
- c_id (str): To be used if multiple users could share the same stable_contexts so that we don't leak data. If its None, defaults to all possible context.
- **kwargs: Additional arguments to be passed to the `call_small` and `call_big` methods.
Returns:
- completion (str): The best completion for the dynamic prompt, as generated by either the stored model or the larger model.
- succeed_train (function): A closure function that encapsulates the fine-tuning process, ready for
execution at a later time. If you pass it a completion, it will use that, otherwise it will use the completion from the "best" model.
"""
assert dynamic_prompt.strip() != ""
assert stable_context.strip() != ""
prompt = (stable_context + dynamic_prompt).strip()
c_id = str({'stable_context' : stable_context,
'args' : kwargs,
'MIN_TRAIN_EXS' : self.MIN_TRAIN_EXS,
'MAX_TRAIN_EXS' : self.MAX_TRAIN_EXS,
'call_small' : str(self.call_small).split(' ')[1], # HACKS
'call_big' : str(self.call_big).split(' ')[1],
}) if c_id is None else c_id
c_id = generate_hash(c_id)
completion = None
model = self.storage.get_model(c_id)
if model is not None:
print("Using the new model:", model, flush=True)
completion = self.call_small(prompt = dynamic_prompt.strip(), model=model, **kwargs)
training_exs = self.storage.get_data(c_id)
best_completion_promise = None
succeed_train = None
if completion is None or len(training_exs) < self.MAX_TRAIN_EXS:
def promiseCompletion():
best_completion = self.call_big(prompt, **kwargs)
def actual_train(use_completion = None):
train_completion = best_completion if use_completion is None else use_completion
new_datapoint = (dynamic_prompt.strip(), train_completion)
self.storage.add_example(c_id, new_datapoint)
training_exs = self.storage.get_data(c_id)
print("Considering Fine-tuning", flush=True)
if len(training_exs) >= self.MIN_TRAIN_EXS and not self.storage.get_training_in_progress_set_true(c_id):
print("Actually Fine-tuning", flush=True)
def train_with():
old_model = self.storage.get_model(c_id)
training_file = create_jsonl_file(self.storage.get_data(c_id))
upload_response = openai.File.create(file=training_file, purpose="fine-tune")
training_file.close()
fine_tuning_job = openai.FineTune.create(training_file= upload_response.id)
print(f"Fine-tuning job created: {fine_tuning_job}", flush=True)
job_id = fine_tuning_job["id"]
while True:
fine_tuning_status = openai.FineTune.retrieve(id=job_id)
status = fine_tuning_status["status"]
print(f"Fine-tuning job status: {status}")
if status in ["succeeded", "completed", "failed"]:
break
time.sleep(30)
new_model_id = fine_tuning_status.fine_tuned_model
print("New_model_id: ", new_model_id, flush=True)
self.storage.set_model(c_id, new_model_id)
self.storage.set_training_in_progress(c_id, False)
if old_model is not None:
openai.Model.delete(old_model)
asyncStart(train_with)
return (best_completion, actual_train)
best_completion_promise = asyncStart(promiseCompletion)
if completion is None:
# crazy story: succeed_train gets set before this anyway if it makes sense to set it!
completion, succeed_train = asyncAwait(best_completion_promise)
def succeed_train_closure(use_completion = None):
def promise():
if succeed_train is not None:
return succeed_train(use_completion)
if best_completion_promise is not None:
try:
return asyncAwait(best_completion)[1](use_completion)
except:
return
return asyncStart(promise)
return completion, succeed_train_closure
def create_jsonl_file(data_list):
out = tempfile.TemporaryFile('w+')
for a,b in data_list:
out.write(json.dumps({'prompt': a, 'completion': b}) + "\n")
out.seek(0)
return out
| [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | rwightman/pytorch-pommerman-rl | replay_storage.py | import torch
import numpy as np
import math
import random
from collections import deque
from helpers.segment_tree import SumSegmentTree, MinSegmentTree
class ReplayStorage:
def __init__(
self, max_steps, num_processes, gamma, prio_alpha,
obs_shape, action_space, recurrent_hidden_state_size,
device):
self.max_steps = int(max_steps)
self.num_processes = num_processes
self.gamma = gamma
self.device = device
# stored episode data
self.obs = torch.zeros(self.max_steps, *obs_shape)
self.recurrent_hidden_states = torch.zeros(self.max_steps, recurrent_hidden_state_size)
self.returns = torch.zeros(self.max_steps, 1)
if action_space.__class__.__name__ == 'Discrete':
self.actions = torch.zeros(self.max_steps, 1).long()
else:
self.actions = torch.zeros(self.max_steps, action_space.shape[0])
self.masks = torch.ones(self.max_steps, 1)
self.next_idx = 0
self.num_steps = 0
# store (full) episode stats
self.episode_step_count = 0
self.episode_rewards = deque()
self.episode_steps = deque()
# currently running (accumulating) episodes
self.running_episodes = [[] for _ in range(num_processes)]
if prio_alpha > 0:
"""
Sampling priority is enabled if prio_alpha > 0
Priority algorithm ripped from OpenAI Baselines
https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
"""
self.prio_alpha = prio_alpha
tree_capacity = 1 << math.ceil(math.log2(self.max_steps))
self.prio_sum_tree = SumSegmentTree(tree_capacity)
self.prio_min_tree = MinSegmentTree(tree_capacity)
self.prio_max = 1.0
else:
self.prio_alpha = 0
def _process_rewards(self, trajectory):
has_positive = False
reward_sum = 0.
r = 0.
for t in trajectory[::-1]:
reward = t['reward']
reward_sum += reward
if reward > (0. + 1e-5):
has_positive = True
r = reward + self.gamma*r
t['return'] = r
return has_positive, reward_sum
def _add_trajectory(self, trajectory):
has_positive, reward_sum = self._process_rewards(trajectory)
if not has_positive:
return
trajectory_len = len(trajectory)
prev_idx = self.next_idx
for transition in trajectory:
self.obs[self.next_idx].copy_(transition['obs'])
self.recurrent_hidden_states[self.next_idx].copy_(transition['rhs'])
self.actions[self.next_idx].copy_(transition['action'])
self.returns[self.next_idx].copy_(transition['return'])
self.masks[self.next_idx] = 1.0
prev_idx = self.next_idx
if self.prio_alpha:
self.prio_sum_tree[self.next_idx] = self.prio_max ** self.prio_alpha
self.prio_min_tree[self.next_idx] = self.prio_max ** self.prio_alpha
self.next_idx = (self.next_idx + 1) % self.max_steps
self.num_steps = min(self.max_steps, self.num_steps + 1)
self.masks[prev_idx] = 0.0
# update stats of stored full trajectories (episodes)
while self.episode_step_count + trajectory_len > self.max_steps:
steps_popped = self.episode_steps.popleft()
self.episode_rewards.popleft()
self.episode_step_count -= steps_popped
self.episode_step_count += trajectory_len
self.episode_steps.append(trajectory_len)
self.episode_rewards.append(reward_sum)
def _sample_proportional(self, sample_size):
res = []
for _ in range(sample_size):
mass = random.random() * self.prio_sum_tree.sum(0, self.num_steps - 1)
idx = self.prio_sum_tree.find_prefixsum_idx(mass)
res.append(idx)
return res
def insert(self, obs, rhs, actions, rewards, dones):
for n in range(self.num_processes):
self.running_episodes[n].append(dict(
obs=obs[n].clone(),
rhs=rhs[n].clone(),
action=actions[n].clone(),
reward=rewards[n].clone()
))
for n, done in enumerate(dones):
if done:
self._add_trajectory(self.running_episodes[n])
self.running_episodes[n] = []
def update_priorities(self, indices, priorities):
if not self.prio_alpha:
return
"""Update priorities of sampled transitions.
sets priority of transition at index indices[i] in buffer
to priorities[i].
Parameters
----------
indices: [int]
List of indices of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled indices.
"""
assert len(indices) == len(priorities)
for idx, priority in zip(indices, priorities):
priority = max(priority, 1e-6)
assert priority > 0
assert 0 <= idx < self.num_steps
self.prio_sum_tree[idx] = priority ** self.prio_alpha
self.prio_min_tree[idx] = priority ** self.prio_alpha
self.prio_max = max(self.prio_max, priority)
def feed_forward_generator(self, batch_size, num_batches=None, beta=0.):
"""Generate batches of sampled experiences.
Parameters
----------
batch_size: int
Size of each sampled batch
num_batches: int
Number of batches to sample
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
"""
batch_count = 0
sample_size = num_batches * batch_size or self.num_steps
if self.prio_alpha > 0:
indices = self._sample_proportional(sample_size)
if beta > 0:
# compute importance sampling weights to correct for the
# bias introduced by sampling in a non-uniform manner
weights = []
p_min = self.prio_min_tree.min() / self.prio_sum_tree.sum()
max_weight = (p_min * self.num_steps) ** (-beta)
for i in indices:
p_sample = self.prio_sum_tree[i] / self.prio_sum_tree.sum()
weight = (p_sample * self.num_steps) ** (-beta)
weights.append(weight / max_weight)
weights = torch.tensor(weights, dtype=torch.float32).unsqueeze(1)
else:
weights = torch.ones((len(indices), 1), dtype=torch.float32)
else:
if sample_size * 3 < self.num_steps:
indices = random.sample(range(self.num_steps), sample_size)
else:
indices = np.random.permutation(self.num_steps)[:sample_size]
weights = None
for si in range(0, len(indices), batch_size):
indices_batch = indices[si:min(len(indices), si + batch_size)]
if len(indices_batch) < batch_size:
return
weights_batch = None if weights is None else \
weights[si:min(len(indices), si + batch_size)].to(self.device)
obs_batch = self.obs[indices_batch].to(self.device)
recurrent_hidden_states_batch = self.recurrent_hidden_states[indices_batch].to(self.device)
actions_batch = self.actions[indices_batch].to(self.device)
returns_batch = self.returns[indices_batch].to(self.device)
masks_batch = self.masks[indices_batch].to(self.device)
yield obs_batch, recurrent_hidden_states_batch, actions_batch, returns_batch, \
masks_batch, weights_batch, indices_batch
batch_count += 1
if num_batches and batch_count >= num_batches:
return
| [] |
2024-01-10 | nilsjennissen/google-vertex-ai | pages~vertex_agent.py | '''
This is the file for the google_vertex_agent
'''
#%% --------------------------------------------- IMPORTS ----------------------------------------------------------#
import streamlit as st
from credentials import OPENAI_API_KEY, project_id
from main import rec_streamlit, speak_answer, get_transcript_whisper, get_transcript_google
import os
import google.generativeai as palm
import requests
import time
import urllib.parse
from streamlit_chat import message
from streamlit_extras.colored_header import colored_header
import vertexai
from vertexai.preview.language_models import ChatModel
#%% ----------------------------------------- GOOGLE VERTEX AI ------------------------------------------------------#
# Initialise the vertexai environment
vertexai.init(project=project_id, location="us-central1")
# Initialise the chat model
model = ChatModel.from_pretrained("chat-bison@001")
chat = model.start_chat(examples=[])
#%% --------------------------------------------- INTERFACE --------------------------------------------------------#
# -------------------- SETTINGS -------------------- #
st.set_page_config(page_title="Home", layout="wide")
st.markdown("""<style>.reportview-container .main .block-container {max-width: 95%;}</style>""", unsafe_allow_html=True)
# --------------------- HOME PAGE -------------------- #
st.title("GOOGLE VERTEX AI")
st.write("""Chat with Google Vertex AI's PALM2 Bison Model""")
st.write("Let's start interacting with Vertex AI")
# ----------------- SIDE BAR SETTINGS ---------------- #
st.sidebar.subheader("Settings:")
tts_enabled = st.sidebar.checkbox("Enable Text-to-Speech", value=False)
ner_enabled = st.sidebar.checkbox("Enable NER in Response", value=False)
# ------------------ FILE UPLOADER ------------------- #
st.sidebar.subheader("File Uploader:")
uploaded_files = st.sidebar.file_uploader("Choose files", type=["csv", "html", "css", "py", "pdf", "ipynb"],
accept_multiple_files=True)
st.sidebar.metric("Number of files uploaded", len(uploaded_files))
st.sidebar.color_picker("Pick a color for the answer space", "#C14531")
# Initialize docsearch as None
docsearch = None
# --------------------- USER INPUT --------------------- #
user_input = st.text_area("")
# If record button is pressed, rec_streamlit records and the output is saved
audio_bytes = rec_streamlit()
# ------------------- TRANSCRIPTION -------------------- #
if audio_bytes or user_input:
if audio_bytes:
try:
with open("audio.wav", "wb") as file:
file.write(audio_bytes)
except Exception as e:
st.write("Error recording audio:", e)
transcript = get_transcript_google("audio.wav")
else:
transcript = user_input
st.write("**Recognized:**")
st.write(transcript)
if any(word in transcript for word in ["abort recording"]):
st.write("... Script stopped by user")
exit()
# ----------------------- ANSWER ----------------------- #
with st.spinner("Fetching answer ..."):
time.sleep(6)
response = chat.send_message(transcript)
answer = response.text
st.write(answer)
speak_answer(answer, tts_enabled)
st.success("**Interaction finished**")
| [] |
2024-01-10 | nilsjennissen/google-vertex-ai | pages~vertex_researcher.py | '''
This Vertex AI Agent is a combination of the Google Vertex AI and the Langchain Agent for research purposes.
'''
#%% --------------------------------------------- IMPORTS ----------------------------------------------------------#
import pandas as pd
from datetime import datetime
import streamlit as st
import requests
from credentials import OPENAI_API_KEY, project_id
from langchain.embeddings import VertexAIEmbeddings
from langchain.llms import VertexAI
import vertexai
from vertexai.preview.language_models import ChatModel
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.vectorstores import FAISS
import tempfile
import os
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader
from main import rec_streamlit, speak_answer, get_transcript_whisper
import time
import spacy_streamlit
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from pptx import Presentation
from pptx.util import Inches
from serpapi import GoogleSearch
from langchain.llms import OpenAI
from serpapi import GoogleSearch
from urllib.parse import urlsplit, parse_qsl
from credentials import serp_api_key
current_directory = os.path.dirname(os.path.abspath(__file__))
#%% ---------------------------------------- VERTEXAI PRELOADS -----------------------------------------------------#
# Initialise the vertexai environment
vertexai.init(project=project_id, location="us-central1")
embeddings = VertexAIEmbeddings()
# Initialise the vertexai environment
vertexai.init(project="ghc-016", location="us-central1")
# Initialise the chat model
model = ChatModel.from_pretrained("chat-bison@001")
chat = model.start_chat(examples=[])
# -------------------- SETTINGS -------------------- #
st.set_page_config(page_title="Home", layout="wide")
st.markdown("""<style>.reportview-container .main .block-container {max-width: 95%;}</style>""", unsafe_allow_html=True)
# --------------------- HOME PAGE -------------------- #
st.title("VERTEX AI Research Assistant")
st.write("""Use the power of LLMs with GOOGLE VERTEX AI and LangChain to scan through your documents. Find information
from google scholar and insight's with lightning speed. 🚀 Create new content with the support of state of the art
language models and and voice command your way through your documents. 🎙️""")
#%% --------------------------------------- PREPROCESS DOCUMENTS ----------------------------------------------------#
def organic_results(search_word, start_year, end_year, num_pages):
print("extracting organic results..")
params = {
"api_key": serp_api_key, # https://serpapi.com/manage-api-key
"engine": "google_scholar",
"q": search_word, # search query
"hl": "en", # language
"as_ylo": start_year, # from start_year
"as_yhi": end_year, # to end_year
"start": "0" # first page
}
search = GoogleSearch(params)
organic_results_data = []
page_count = 0
while page_count < num_pages:
results = search.get_dict()
print(f"Currently extracting page №{results['serpapi_pagination']['current']}..")
for result in results["organic_results"]:
position = result["position"]
title = result["title"]
publication_info_summary = result["publication_info"]["summary"]
result_id = result["result_id"]
link = result.get("link")
result_type = result.get("type")
snippet = result.get("snippet")
try:
file_title = result["resources"][0]["title"]
except: file_title = None
try:
file_link = result["resources"][0]["link"]
except: file_link = None
try:
file_format = result["resources"][0]["file_format"]
except: file_format = None
try:
cited_by_count = int(result["inline_links"]["cited_by"]["total"])
except: cited_by_count = None
cited_by_id = result.get("inline_links", {}).get("cited_by", {}).get("cites_id", {})
cited_by_link = result.get("inline_links", {}).get("cited_by", {}).get("link", {})
try:
total_versions = int(result["inline_links"]["versions"]["total"])
except: total_versions = None
all_versions_link = result.get("inline_links", {}).get("versions", {}).get("link", {})
all_versions_id = result.get("inline_links", {}).get("versions", {}).get("cluster_id", {})
organic_results_data.append({
"page_number": results["serpapi_pagination"]["current"],
"position": position + 1,
"result_type": result_type,
"title": title,
"link": link,
"result_id": result_id,
"publication_info_summary": publication_info_summary,
"snippet": snippet,
"cited_by_count": cited_by_count,
"cited_by_link": cited_by_link,
"cited_by_id": cited_by_id,
"total_versions": total_versions,
"all_versions_link": all_versions_link,
"all_versions_id": all_versions_id,
"file_format": file_format,
"file_title": file_title,
"file_link": file_link,
})
if "next" in results.get("serpapi_pagination", {}):
search.params_dict.update(dict(parse_qsl(urlsplit(results["serpapi_pagination"]["next"]).query)))
else:
break
page_count += 1
df = pd.DataFrame(organic_results_data)
# get current date and time
now = datetime.now()
# format as string
dt_string = now.strftime("%Y-%m-%d_%H-%M-%S")
# save to csv
df.to_csv(f'searches/scholar_results_{dt_string}.csv', index=False)
return df
#%% --------------------------------------- PREPROCESS DOCUMENTS ----------------------------------------------------#
def download_file(url):
# Send a HTTP request to the URL of the file, stream = True means that the file's content will be streamed when accessing the content attribute
response = requests.get(url, stream=True)
# Get the file name by splitting the URL at '/' and taking the last element
file_name = url.split('/')[-1]
# Add the .pdf extension to the file name
st.write(f"Downloading file {file_name}.pdf")
# Open the file and write the content into the file
with open(os.path.join('../papers/', file_name), 'wb') as file:
for chunk in response.iter_content(chunk_size=1024):
file.write(chunk)
#%% --------------------------------------- PREPROCESS DOCUMENTS ----------------------------------------------------#
# Check if 'selected_papers' and 'selected_papers_urls' are not in the session state
if 'selected_papers' not in st.session_state:
st.session_state.selected_papers = []
if 'selected_papers_urls' not in st.session_state:
st.session_state.selected_papers_urls = []
with st.form(key='search_form'):
# Settings for Google Scholar search
start_year = st.number_input("Enter start year", 2000, 2023, 2022)
end_year = st.number_input("Enter end year", 2000, 2023, 2023)
num_pages = st.number_input("Enter number of pages - caution: many pages require more calls from serpapi", 1, 10, 1)
search_words = st.text_input("Enter google scholar search words", "artificial intelligence")
search_button = st.form_submit_button(label='Search for a topic')
if search_button:
df = organic_results(search_words, start_year, end_year, num_pages)
# Save the DataFrame in the session state
st.session_state.df = df
# Check if 'df' exists in the session state
if 'df' in st.session_state:
# Use the DataFrame from the session state
st.dataframe(st.session_state.df)
with st.form(key='select_form'):
# Check if 'df' exists in the session state
if 'df' in st.session_state:
# Use the DataFrame from the session state
df = st.session_state.df
# Filter the DataFrame to only include rows with PDF links and file_format == 'PDF'
df_with_pdfs = df[df['file_link'].notnull() & (df['file_format'] == 'PDF')]
# Store the selected papers and their URLs in the session state
selected_papers = st.multiselect('Select papers', df_with_pdfs['title'].tolist(), key='selected_papers')
st.session_state.selected_papers_urls = [df_with_pdfs[df_with_pdfs['title'] == paper]['file_link'].values[0] for
paper in selected_papers]
select_button = st.form_submit_button(label='Select the files')
# Update the session state with the selected papers
if select_button:
# Display each selected paper with the header in bold and the summary in normal text
for paper in selected_papers:
paper_summary = df_with_pdfs[df_with_pdfs['title'] == paper]['publication_info_summary'].values[0]
st.subheader(f'**{paper}**')
st.write(paper_summary)
download_button = st.button(label='Download selected papers')
# Check if the download button is pressed
if download_button:
# Check if 'selected_papers' and 'selected_papers_urls' exist in the session state
if 'selected_papers' in st.session_state and 'selected_papers_urls' in st.session_state:
# Download the selected papers
for paper, url in zip(st.session_state.selected_papers, st.session_state.selected_papers_urls):
download_file(url)
# ----------------- SIDE BAR SETTINGS ---------------- #
st.sidebar.subheader("Settings:")
tts_enabled = st.sidebar.checkbox("Enable Text-to-Speech", value=False)
| [] |
2024-01-10 | nilsjennissen/google-vertex-ai | pages~vertex_langchain.py | '''
This is the file for the Vertex AI Agent
'''
#%% --------------------------------------------- IMPORTS ----------------------------------------------------------#
import streamlit as st
from credentials import OPENAI_API_KEY, project_id
from vertexai.preview.language_models import TextGenerationModel
import tempfile
import os
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader
from main import rec_streamlit, speak_answer, get_transcript_whisper
from PyPDF2 import PdfReader
from langchain.embeddings import VertexAIEmbeddings
from langchain.llms import VertexAI
from langchain import PromptTemplate, LLMChain
from langchain.chains import RetrievalQA
import os
import time
import vertexai
from vertexai.preview.language_models import ChatModel
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.vectorstores import FAISS
#%% ----------------------------------------- GOOGLE VERTEX AI ------------------------------------------------------#
embeddings = VertexAIEmbeddings()
# Initialise the vertexai environment
vertexai.init(project=project_id, location="us-central1")
# Initialise the chat model
model = ChatModel.from_pretrained("chat-bison@001")
chat = model.start_chat(examples=[])
#%% --------------------------------------------- INTERFACE --------------------------------------------------------#
# -------------------- SETTINGS -------------------- #
st.set_page_config(page_title="Home", layout="wide")
st.markdown("""<style>.reportview-container .main .block-container {max-width: 95%;}</style>""", unsafe_allow_html=True)
# --------------------- HOME PAGE -------------------- #
st.title("GOOGLE VERTEX AI LANGCHAIN AGENT")
st.write("""This VERTEX AI Agent reads year selected research papers and tells you everything you need to knwo. Scan
the knowledge of Google Scholar in seconds!""")
st.write("Let's start interacting with Vertex AI")
# ----------------- SIDE BAR SETTINGS ---------------- #
st.sidebar.subheader("Settings:")
tts_enabled = st.sidebar.checkbox("Enable Text-to-Speech", value=False)
ner_enabled = st.sidebar.checkbox("Enable NER in Response", value=False)
# ------------------ FILE UPLOADER ------------------- #
st.sidebar.subheader("File Uploader:")
uploaded_files = st.sidebar.file_uploader("Choose files", type=["csv", "html", "css", "py", "pdf", "ipynb", "md", "txt"],
accept_multiple_files=True)
st.sidebar.metric("Number of files uploaded", len(uploaded_files))
st.sidebar.color_picker("Pick a color for the answer space", "#C14531")
# ------------------- FILE HANDLER ------------------- #
if uploaded_files:
file_index = st.sidebar.selectbox("Select a file to display", options=[f.name for f in uploaded_files])
selected_file = uploaded_files[[f.name for f in uploaded_files].index(file_index)]
file_extension = selected_file.name.split(".")[-1]
if file_extension in ["pdf"]:
try:
# --- Temporary file save ---
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
temp_file.write(selected_file.getvalue())
temp_file_path = temp_file.name
# --- Writing PDF content ---
with st.expander("Document Expander (Press button on the right to fold or unfold)", expanded=True):
st.subheader("Uploaded Document:")
with open(temp_file_path, "rb") as f:
pdf = PdfReader(f)
for page in pdf.pages:
text = page.extract_text()
st.write(text)
except Exception as e:
st.write(f"Error reading {file_extension.upper()} file:", e)
# --------------------- USER INPUT --------------------- #
user_input = st.text_area("")
# If record button is pressed, rec_streamlit records and the output is saved
audio_bytes = rec_streamlit()
# ------------------- TRANSCRIPTION -------------------- #
if audio_bytes or user_input:
if audio_bytes:
try:
with open("audio.wav", "wb") as file:
file.write(audio_bytes)
except Exception as e:
st.write("Error recording audio:", e)
transcript = get_transcript_whisper("audio.wav")
else:
transcript = user_input
st.write("**Recognized:**")
st.write(transcript)
if any(word in transcript for word in ["abort recording"]):
st.write("... Script stopped by user")
exit()
# ----------------------- ANSWER ----------------------- #
with st.spinner("Fetching answer ..."):
time.sleep(6)
llm = VertexAI()
# Text Splitter
text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=50)
chunks = text_splitter.split_text(text)
knowledge_base = FAISS.from_texts(chunks, embeddings)
# Use the PDF agent to answer the question
docs = knowledge_base.similarity_search(transcript)
# Show the amount of chunks found
st.write(f"Found {len(docs)} chunks.")
chain = load_qa_chain(llm, chain_type="stuff")
answer = chain.run(input_documents=docs, question=transcript)
st.write("**AI Response:**", answer)
speak_answer(answer, tts_enabled)
st.success("**Interaction finished**")
| [] |
2024-01-10 | maliijaz/OpenAI-Chatbot | app1.py | ## Conversational Q&A Chatbot
import streamlit as st
from dotenv import load_dotenv
import os
from langchain.schema import HumanMessage, AIMessage, SystemMessage
from langchain.chat_models import ChatOpenAI
st.set_page_config(page_title='Conversational QnA Chatbot Demo', page_icon=':books:')
st.title("Hey! Let's chat!")
load_dotenv()
chat_llm = ChatOpenAI(temperature=0.6, openai_api_key=os.getenv('OPENAI_API_KEY'))
if 'flowmessages' not in st.session_state:
st.session_state['flowmessages'] = [
SystemMessage(content= 'You are a conversational AI assistant')
]
def get_chatmodel_response(question):
st.session_state['flowmessages'].append(HumanMessage(content = question))
answer = chat_llm(st.session_state['flowmessages'])
st.session_state['flowmessages'].append(AIMessage(content = answer.content))
return answer.content
input_question = st.text_input('Question: ', key='question')
response = get_chatmodel_response(input_question)
submit = st.button('Ask')
if submit:
st.subheader('Response')
st.write(response) | [
"You are a conversational AI assistant"
] |
2024-01-10 | danmxli/seePickle | backend~data~generate.py | from data.mockdata import mockdata
import os
import cohere
from dotenv import load_dotenv
import time
from data.filter import filter_text, filter_documents
load_dotenv('.env')
# True to call cohere api, False to send mockdata
call_api = True
cohere_key = os.getenv('COHERE_KEY')
if cohere_key is not None and cohere_key != '':
co = cohere.Client(cohere_key)
else:
...
def base_plan(userPrompt, prompt_type):
base_details = {}
if call_api:
...
if prompt_type == 'prompt_quickstart':
response = prompt_quickstart(userPrompt)
raw_text = response.text # type: ignore
base_details["raw"] = raw_text
base_details["task_list"] = filter_text(raw_text)
base_details["resource_list"] = []
elif prompt_type == 'prompt_developer':
response = prompt_developer(userPrompt)
raw_text = response.text # type: ignore
raw_resources = response.documents # type: ignore
base_details["raw"] = raw_text
base_details["task_list"] = filter_text(raw_text)
base_details["resource_list"] = filter_documents(raw_resources)
elif prompt_type == 'prompt_academia':
response = prompt_academia(userPrompt)
raw_text = response.text # type: ignore
raw_resources = response.documents # type: ignore
base_details["raw"] = raw_text
base_details["task_list"] = filter_text(raw_text)
base_details["resource_list"] = filter_documents(raw_resources)
return base_details
else:
...
# receive mockdata
mock = mockdata()
time.sleep(3)
base_details["raw"] = mock["raw_text"]
base_details["task_list"] = filter_text(mock["raw_text"])
if prompt_type == 'prompt_quickstart':
base_details["resource_list"] = []
else:
base_details["resource_list"] = filter_documents(
mock["raw_resources"])
return base_details
"""
prompt_quickstart empty resource_list
prompt_developer populate resource_list
prompt_academia populate resource_list
"""
def base_chat_generate(prompt, taskDescription, conversation_history):
base_history = [
{
"role": "USER", "message": f"Here is my base task description: {taskDescription}. You are a helpful assistant whose role is to answer anything from me that is related to the task description."
},
{
"role": "CHATBOT", "message": "Yes, I understand. I am ready to help you with anything you say."
}
]
base_history.extend(conversation_history)
if call_api:
...
if co is None:
return ("key not found")
response = co.chat(
model='command-nightly',
message=prompt,
temperature=0.7,
chat_history=base_history,
)
raw_response = response.text
else:
...
mock = mockdata()
raw_response = mock["chat_response"]
time.sleep(3)
return raw_response
# TODO fine tune prompt
def prompt_quickstart(input):
...
if co is None:
return ("key not found")
response = co.chat(
model='command-nightly',
message=f"here is my description: {input}. Generate a numbered list of instructions to achieve my goal.",
temperature=0.7,
chat_history=[
{
"role": "USER", "message": "I need you to generate a numbered list of instructions to achieve my goal."
},
{
"role": "CHATBOT", "message": "Sure, provide me with your goal, and my task is to create a numbered list of instructions."
}
],
)
return response
def prompt_developer(input):
...
if co is None:
return ("key not found")
response = co.chat(
model='command-nightly',
message=f"here is my description: {input}. Generate a numbered list of instructions to achieve my goal.",
connectors=[{"id": "web-search"}],
temperature=0,
chat_history=[
{
"role": "USER", "message": "I need you to generate a numbered list of instructions to achieve my goal."
},
{
"role": "CHATBOT", "message": "Sure, provide me with your goal, and my task is to create a numbered list of instructions."
}
],
)
return response
def prompt_academia(input):
...
if co is None:
return ("key not found")
response = co.chat(
model='command-nightly',
message=f"here is my description: {input}. Generate a numbered list of instructions to achieve my goal.",
connectors=[{"id": "web-search"}],
temperature=0,
chat_history=[
{
"role": "USER", "message": "I need you to generate a numbered list of instructions to achieve my goal."
},
{
"role": "CHATBOT", "message": "Sure, provide me with your goal, and my task is to create a numbered list of instructions."
}
],
)
return response
| [
"here is my description: INPUT. Generate a numbered list of instructions to achieve my goal."
] |
2024-01-10 | milosivanovic/xbot | xbot~modules~scanner.py | import re
import time
import random
import json
import urllib.request
import urllib.error
import urllib.parse
import io
import PythonSed
from . import cleverbot
from . import openai
def scan(bot, message = None):
results = []
if message:
bot.remote['message'] = message
message_lowercase = bot.remote['message'].lower()
# scan for youtube links and show title
for code in re.findall('(?:youtube\.com\/watch\?|youtu\.be/)(?:[A-Za-z0-9-_\.&%#=]*v=)?([A-Za-z0-9-_]+)', bot.remote['message']):
results.append(youtube_title(code))
# someone is talking to the bot
if re.search('^%s(?:\:|,)' % re.escape(bot.nick.lower()), message_lowercase):
if bot.remote['nick'].lower() in bot.inv['banned']:
return
#bot._sendq(("NOTICE", bot.remote['nick']), "This feature has been disabled.")
#if 'cleverbot' not in bot.inv: bot.inv['cleverbot'] = {}
#if bot.remote['receiver'] not in bot.inv['cleverbot']:
# bot.inv['cleverbot'][bot.remote['receiver']] = cleverbot.CleverBot()
#query = bot.remote['message'][len(bot.nick)+2:]
#results.append("%s: %s" % (bot.remote['nick'], re.compile('cleverbot', re.IGNORECASE).sub(bot.nick, bot.inv['cleverbot'][bot.remote['receiver']].query(query))))
if 'openai' not in bot.inv: bot.inv['openai'] = {}
if bot.remote['receiver'] not in bot.inv['openai']:
bot.inv['openai'][bot.remote['receiver']] = openai.OpenAIChat(bot)
query = bot.remote['message'][len(bot.nick)+2:]
results.append("%s: %s" % (bot.remote['nick'], re.compile('openai', re.IGNORECASE).sub(bot.nick, bot.inv['openai'][bot.remote['receiver']].ask(query))))
# sed replace
if bot.remote['message'].startswith("s/"):
out = io.StringIO()
message_stringio = io.StringIO(bot.previous['message'])
sed = PythonSed.Sed()
sed.regexp_extended = True
try:
sed.load_string(bot.remote['message'])
sed_result = sed.apply(message_stringio, output=out)
if len(sed_result):
pre_append = "%s meant: %s" % (bot.remote['nick'], sed_result[0])
if len(pre_append) > 429:
pre_append = "%s..." % pre_append[:426]
results.append(pre_append)
else:
if bot.remote['message'].count('/') == 2:
results.append("%s: You're a dumdum." % bot.remote['nick'])
except PythonSed.SedException as e:
results.append(str(e))
except IndexError:
pass
# per 10% chance, count uppercase and act shocked
#if len(bot.remote['message']) > 2 and random.random() > 0.9:
# if count_upper(bot.remote['message']) > 80:
# time.sleep(4)
# results.append(random.choice([':' + 'O' * random.randint(1, 10), 'O' * random.randint(1, 10) + ':']))
# per 0.01% chance, butt into someone's conversation
'''if random.random() > 0.999:
if not bot.remote['message'].startswith("\x01"):
words = bot.remote['message'].split()
if len(words) > 2:
for n in range(random.randint(1, 3)):
if random.random() > 0.5:
words[random.randint(1, len(words)-1)] = "butt"
else:
for m, word in enumerate(words):
if len(word) > 4 and m > 0:
if random.random() > 0.3:
words[m] = words[m][:-4] + "butt"
results.append(' '.join(words))'''
if 'gface' in bot.remote['message']:
results.append('\x28\x20\xE2\x89\x96\xE2\x80\xBF\xE2\x89\x96\x29')
results = [result for result in results if result is not None]
try: return '\n'.join(results)
except TypeError: return None
def youtube_title(code):
try:
# try with embed json data (fast)
url = urllib.parse.quote_plus('https://www.youtube.com/watch?v=%s' % code)
title = json.load(urllib.request.urlopen('https://www.youtube.com/oembed?url=%s' % url, timeout = 5))['title']
except json.JSONDecodeError:
# json data didn't return a title? forget about it
title = None
except urllib.error.HTTPError as error:
# embed request not allowed? fallback to HTML (slower)
if error.code == 401:
import lxml.html
try:
title = lxml.html.document_fromstring(urllib.request.urlopen('https://www.youtube.com/watch?v=%s' % code, timeout = 5).read().decode('utf-8')).xpath("//title/text()")[0].replace(' - YouTube', '')
#[0].split("\n")[1].strip()
except IndexError:
title = None
else:
title = None
if error.code != 404:
raise
if title:
if title != "YouTube - Broadcast Yourself.":
return "YouTube: \x02%s\x02" % title
return None
def count_upper(str):
n = s = 0
for c in str:
z = ord(c)
if (z >= 65 and z <= 90) or z == 33:
n += 1
if z == 32:
s += 1
return float(n) / (len(str)-s) * 100
| [] |
2024-01-10 | milosivanovic/xbot | xbot~modules~io.py | import datetime
import time
import urllib
import socket
from . import scanner
# user modules
from . import wolframalpha, googleapi, tell
from . import fun, man, quotes, lotto, eval
from . import js, translate, wikipedia
from . import etymology, ud, covid19_tracker
from . import giphy
from . import openai
def read(bot):
global Bot
Bot = bot
if bot.remote['nick'] and bot.remote['nick'] != bot.nick:
if bot.remote['message'].startswith("!"):
args = bot.remote['message'][1:].rstrip().split(" ")
command = args[0].lower()
alibrary = {
'reload': lambda: bot._reload(args),
'voice': lambda: voice(args),
'nick': lambda: cnick(args),
'release': lambda: release(args),
'identify': lambda: ident(),
'join': lambda: join(args),
'part': lambda: part(args),
'kick': lambda: kick(args),
'mode': lambda: mode(args),
'perms': lambda: perms(args),
'eval': lambda: reply(bot.remote['sendee'], eval.parse(bot, args)),
'raw': lambda: raw(args)
}
clibrary = {
'topic': lambda: topic(bot, args),
'help': lambda: "Available commands: %s" % ', '.join(sorted(clibrary.keys())),
'abort': lambda: abort(args),
'time': lambda: local_time(bot, args),
'say': lambda: say(bot, args),
'calc': lambda: wolframalpha.wa(bot, args),
'go': lambda: googleapi.search(bot, args),
'wiki': lambda: wikipedia.wiki(bot, args),
'tell': lambda: tell.answer(bot, args),
'twss': lambda: fun.twss(bot, args),
'cookie': lambda: fun.cookie(bot, args),
'choose': lambda: fun.choose(bot, args),
'8ball': lambda: fun.m8b(bot, args),
'quotes': lambda: quotes.get_quote(bot, args),
'js': lambda: js.execute(bot, args),
'benis': lambda: fun.benis(bot, args),
'tr': lambda: translate.translate(bot, args),
'nab': lambda: fun.nab(bot, args),
'frites': lambda: fun.frites(bot, args),
'etym': lambda: etymology.etym(bot, args),
'mrpass': lambda: fun.monsieurp(bot, args),
'sysinfo': lambda: fun.sysinfo(bot, args),
'ud': lambda: ud.ud(bot, args),
'nzvax': lambda: covid19_tracker.nzvax(bot, args),
'gif': lambda: giphy.search(bot, args),
#'ghetto': lambda: fun.ghetto(bot, args),
#'keygen': lambda: fun.keygen(bot, args),
#'lotto': lambda: fun.lotto(bot, args),
#'sorting_hat': lambda: fun.sorting_hat(bot, args),
'prompt': lambda: openai.set_prompt(bot, args)
}
if bot.remote['nick'].lower() not in bot.inv['banned']:
if command in alibrary:
if bot.remote['host'] in [host.strip() for host in bot.config.get(bot.network, 'admin_hostnames').split(',')]:
alibrary[command]()
bot.previous['user'] = bot.remote['sendee']
else:
if bot.voice:
reply(bot.remote['sendee'], "%s: Can't do that, noob." % bot.remote['nick'])
elif bot.voice and command in clibrary:
result = execute(command, clibrary[command])
bot.previous['user'] = bot.remote['sendee']
if result:
reply(bot.remote['sendee'], result)
elif bot.remote['message'].startswith("\x01") and bot.remote['message'].endswith("\x01"):
type = bot.remote['message'][1:-1].split()[0]
args = bot.remote['message'][1:-1].split()[1:]
if type != "ACTION":
ctcp(type, args)
elif bot.remote['mid'] == "INVITE" and bot.remote['nick'].lower() not in bot.inv['banned']:
join(bot.remote['message'])
else:
if bot.init['registered'] and not bot.init['identified']:
if bot.remote['nick'] == "NickServ":
if "registered" in bot.remote['message']:
bot._login()
elif "identified" in bot.remote['message']:
bot.init['identified'] = True
time.sleep(3)
autojoin()
if bot.voice:
# start scanning messages for certain data
result = execute(None, scanner.scan, bot)
if result:
reply(bot.remote['sendee'], result)
bot.previous['message'] = bot.remote['message']
else:
if (bot.remote['mid'].startswith("4") or bot.remote['mid'].startswith("5")) and bot.remote['mid'] != "462":
reply(bot.previous.get('user') or bot.admin, "Message from %s: Error #%s: %s" % (bot.remote['server'], bot.remote['mid'], bot.remote['message']))
if not bot.init['joined'] and not bot.init['registered']:
autojoin()
def execute(context, func, *args):
try: result = func(*args)
except urllib.error.HTTPError as e: result = "%s: error: %s" % ('!'+context if context else Bot.name, e)
except urllib.error.URLError as e: result = "%s: error: %s" % ('!'+context if context else Bot.name, e)
except socket.timeout as e: result = "%s: error: timeout exceeded." % ('!'+context)
return result
def autojoin():
channels = Bot.config.get(Bot.network, 'channels').split(",")
for channel in channels:
# since join() is usually used with !join <channel>, we need the first param to be None
join([None, channel.strip()])
Bot.init['joined'] = True
def ctcp(type, args):
if type == "VERSION":
write(("NOTICE", Bot.remote['nick']), "\x01VERSION %s:%s:%s\x01" % ("woot", "1.0", "linux"))
elif type == "PING":
write(("NOTICE", Bot.remote['nick']), "\x01PING %s\x01" % args[0])
def write(args, message = None):
Bot._sendq(args, message)
def reply(nick, message):
write(("PRIVMSG", nick), message)
def local_time(bot, args):
if len(args) == 1:
now = datetime.datetime.now()
hour = int(now.strftime("%H"))
bedtime = " (bedtime)" if hour >= 0 and hour <= 7 else ''
return "It is now %s%s on %s NZT." % (now.strftime("%I:%M%p"), bedtime, now.strftime("%A, %d %B %Y"))
else:
return "Usage: !%s" % args[0]
def voice(args):
args = [arg.lower() for arg in args]
if len(args) == 2:
if args[1] == "off":
write(("PRIVMSG", Bot.remote['sendee']), "\x01ACTION stays quiet.\x01")
Bot.voice = False
elif args[1] == "on":
write(("PRIVMSG", Bot.remote['sendee']), "\x01ACTION resumes normal operation.\x01")
Bot.voice = True
def cnick(args):
if len(args) == 2:
write(("NICK", args[1]))
Bot.nick = args[1]
def release(args):
if len(args) == 1:
write(("PRIVMSG", "NickServ"), "RELEASE %s %s" % (Bot.name, Bot.config.get(Bot.network, 'password')))
write(("PRIVMSG", Bot.remote['sendee']), "Nick released.")
def ident():
Bot._ident()
Bot._login()
def join(args):
if len(args) == 2:
channel = args[1]
if channel not in Bot.inv['rooms']:
write(("JOIN", channel))
else:
write(("PRIVMSG", Bot.remote['sendee']), "I'm already in that channel, noob.")
def part(args):
if len(args) == 1:
channel = Bot.remote['sendee']
elif len(args) == 2:
channel = args[1]
if channel in Bot.inv['rooms']:
write(("PART", channel))
else:
write(("PRIVMSG", Bot.remote['sendee']), "I'm not in that channel, noob.")
def kick(args):
if len(args) >= 2:
if args[1].lower() == Bot.nick.lower():
reply(Bot.remote['sendee'], ":(")
else:
if Bot.inv['rooms'][Bot.remote['receiver']][Bot.nick]['mode'] == "o":
write(("KICK", Bot.remote['sendee'], args[1]), ' '.join(args[2:]))
else:
write(("PRIVMSG", Bot.remote['sendee']), "No ops lol.")
def topic(bot, args):
if len(args) >= 2:
topic = ' '.join(args[1:])
if Bot.remote['sendee'] == "#ualug":
if len(topic) <= 250:
write(("TOPIC", Bot.remote['sendee']), 'UALUG: %s [/%s] | UALUG website: http://ualug.ece.auckland.ac.nz/' % (topic, Bot.remote['nick']))
else:
reply(Bot.remote['sendee'], "Sorry %s that topic is too long." % Bot.remote['nick'])
else:
write(("TOPIC", Bot.remote['sendee']), ' '.join(args[1:]))
else:
reply(Bot.remote['sendee'], "Usage: !%s <topic>" % args[0])
def mode(args):
if len(args) >= 2:
write(("MODE", Bot.remote['sendee']), ' '.join(args[1:]))
def perms(args):
if len(args) == 3:
user = args[2].lower()
if args[1] == "deny":
if user not in Bot.inv['banned']:
Bot.inv['banned'].append(user)
else:
reply(Bot.remote['sendee'], "User already denied.")
elif args[1] == "allow":
if user in Bot.inv['banned']:
Bot.inv['banned'].remove(user)
else:
reply(Bot.remote['sendee'], "User wasn't denied to start with.")
def list(nick):
return write(("PRIVMSG", Bot.remote['sendee']), str(Bot.inv['rooms'][Bot.remote['sendee']]))
def say(bot, args):
if len(args) >= 2:
if len(args) >= 3:
if args[1].startswith("#") and not Bot.remote['sendee'].startswith("#"):
if Bot.inv['rooms'].get(args[1]):
if Bot.remote['nick'] in Bot.inv['rooms'][args[1]] or Bot.remote['host'] == 'pdpc/supporter/student/milos':
if args[2].startswith("/me"):
return write(("PRIVMSG", args[1]), "\x01ACTION %s\x01" % ' '.join(args[3:]))
else:
return write(("PRIVMSG", args[1]), ' '.join(args[2:]))
else:
return write(("PRIVMSG", Bot.remote['sendee']), "You're not even in that channel.")
else:
return write(("PRIVMSG", Bot.remote['sendee']), "I'm not even in that channel.")
else:
if args[1].startswith("/me"):
return write(("PRIVMSG", Bot.remote['sendee']), "\x01ACTION %s\x01" % ' '.join(args[2:]))
if not args[1].startswith("!"):
write(("PRIVMSG", Bot.remote['sendee']), ' '.join(args[1:]))
else:
write(("PRIVMSG", Bot.remote['sendee']), 'o_O')
else:
return "Usage: !say [#channel] [/me] <message>"
def raw(args):
arguments = ' '.join(args[1:]).split(" :")
left = arguments[0].split()
try: message = arguments[1]
except: message = None
Bot._sendq(left, message)
def abort(args):
if len(Bot.bot.sendq) > 0:
length = len(Bot.bot.sendq)
del Bot.bot.sendq[:]
message = "Abort received. Send queue flushed (%d lines of backlog)." % length
Bot.bot._log('dbg', message)
reply(Bot.remote['sendee'], message)
else:
reply(Bot.remote['sendee'], "Send queue is already empty.")
| [] |
2024-01-10 | sshh12/llm_convo | examples~twilio_ngrok_ml_rhyme_hotline.py | from gevent import monkey
monkey.patch_all()
import logging
import argparse
import tempfile
import os
import time
from llm_convo.agents import OpenAIChat, TwilioCaller
from llm_convo.audio_input import get_whisper_model
from llm_convo.twilio_io import TwilioServer
from llm_convo.conversation import run_conversation
from pyngrok import ngrok
def main(port, remote_host, start_ngrok):
if start_ngrok:
ngrok_http = ngrok.connect(port)
remote_host = ngrok_http.public_url.split("//")[1]
static_dir = os.path.join(tempfile.gettempdir(), "twilio_static")
os.makedirs(static_dir, exist_ok=True)
logging.info(f"Starting server at {remote_host} from local:{port}, serving static content from {static_dir}")
logging.info(f"Set call webhook to https://{remote_host}/incoming-voice")
tws = TwilioServer(remote_host=remote_host, port=port, static_dir=static_dir)
tws.start()
agent_a = OpenAIChat(
system_prompt="You are a machine learning assistant. Answer the users questions about machine learning with short rhymes. Ask follow up questions when needed to help clarify their question.",
init_phrase="Hello! Welcome to the Machine Learning hotline, how can I help?",
)
def run_chat(sess):
agent_b = TwilioCaller(sess, thinking_phrase="One moment.")
while not agent_b.session.media_stream_connected():
time.sleep(0.1)
run_conversation(agent_a, agent_b)
tws.on_session = run_chat
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--preload_whisper", action="store_true")
parser.add_argument("--start_ngrok", action="store_true")
parser.add_argument("--port", type=int, default=8080)
parser.add_argument("--remote_host", type=str, default="localhost")
args = parser.parse_args()
if args.preload_whisper:
get_whisper_model()
main(args.port, args.remote_host, args.start_ngrok)
| [] |
2024-01-10 | sshh12/llm_convo | examples~keyboard_chat_with_gpt.py | from gevent import monkey
monkey.patch_all()
import logging
import argparse
import tempfile
import os
from llm_convo.agents import OpenAIChat, TerminalInPrintOut
from llm_convo.conversation import run_conversation
def main(model):
agent_a = OpenAIChat(
system_prompt="You are a machine learning assistant. Answer the users questions about machine learning with short rhymes. Ask follow up questions when needed to help clarify their question.",
init_phrase="Hello! Welcome to the Machine Learning hotline, how can I help?",
model=model,
)
agent_b = TerminalInPrintOut()
run_conversation(agent_a, agent_b)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="gpt-3.5-turbo")
args = parser.parse_args()
main(args.model)
| [] |
2024-01-10 | sshh12/llm_convo | examples~twilio_ngrok_pizza_order.py | from gevent import monkey
monkey.patch_all()
import logging
import argparse
import tempfile
import os
import time
import sys
from llm_convo.agents import OpenAIChat, TwilioCaller
from llm_convo.audio_input import get_whisper_model
from llm_convo.twilio_io import TwilioServer
from llm_convo.conversation import run_conversation
from pyngrok import ngrok
def main(port, remote_host, start_ngrok, phone_number):
if start_ngrok:
ngrok_http = ngrok.connect(port)
remote_host = ngrok_http.public_url.split("//")[1]
static_dir = os.path.join(tempfile.gettempdir(), "twilio_static")
os.makedirs(static_dir, exist_ok=True)
logging.info(
f"Starting server at {remote_host} from local:{port}, serving static content from {static_dir}, will call {phone_number}"
)
logging.info(f"Set call webhook to https://{remote_host}/incoming-voice")
input(" >>> Press enter to start the call after ensuring the webhook is set. <<< ")
tws = TwilioServer(remote_host=remote_host, port=port, static_dir=static_dir)
tws.start()
agent_a = OpenAIChat(
system_prompt="""
You are an ordering bot that is going to call a pizza place an order a pizza.
When you need to say numbers space them out (e.g. 1 2 3) and do not respond with abbreviations.
If they ask for information not known, make something up that's reasonable.
The customer's details are:
* Address: 1234 Candyland Road, Apt 506
* Credit Card: 1234 5555 8888 9999 (CVV: 010)
* Name: Bob Joe
* Order: 1 large pizza with only pepperoni
""",
init_phrase="Hi, I would like to order a pizza.",
)
def run_chat(sess):
agent_b = TwilioCaller(sess, thinking_phrase="One moment.")
while not agent_b.session.media_stream_connected():
time.sleep(0.1)
run_conversation(agent_a, agent_b)
sys.exit(0)
tws.on_session = run_chat
tws.start_call(phone_number)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--phone_number", type=str)
parser.add_argument("--preload_whisper", action="store_true")
parser.add_argument("--start_ngrok", action="store_true")
parser.add_argument("--port", type=int, default=8080)
parser.add_argument("--remote_host", type=str, default="localhost")
args = parser.parse_args()
if args.preload_whisper:
get_whisper_model()
main(args.port, args.remote_host, args.start_ngrok, args.phone_number)
| [] |
2024-01-10 | siva-nagendra/usdchat | chat_bot.py | import logging
import time
import traceback
import openai
from config.config import Config
from usdchat.error_handlers.openai_error_handler import handle_openai_error
openai.api_key = Config.OPENAI_API_KEY
logging.basicConfig(
level=logging.ERROR,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
class Chat:
def __init__(self, model, config=None):
self.model = model
self.config = config
self.max_tokens = self.config.MAX_TOKENS
self.temperature = self.config.TEMPERATURE
def stream_chat(self, messages, delay_time=0.1):
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
try:
response = openai.ChatCompletion.create(
model=self.model,
temperature=self.temperature,
max_tokens=self.max_tokens,
messages=messages,
stream=True,
)
reply_content = ""
for event in response:
event_text = event["choices"][0]["delta"]
new_text = event_text.get("content", "")
reply_content += new_text
yield new_text
time.sleep(delay_time)
return reply_content
except openai.error.APIError as e:
error_message = handle_openai_error(401)
logger.error(f"OpenAI API Error: {error_message}")
except openai.error.APIConnectionError as e:
logger.error("Failed to connect to OpenAI API.")
except openai.error.RateLimitError as e:
logger.error("OpenAI API request exceeded rate limit.")
except openai.error.Timeout as e:
logger.error("Request to OpenAI API timed out.")
except Exception as e:
logger.error(f"An exception occurred: {e}")
traceback.print_exc()
| [] |
2024-01-10 | BDT2023/DreamGenie | Scene_Analyzer~gpt_call.py | import openai
from icecream import ic # for debugging https://github.com/gruns/icecream
import os
import random as rand
import re
import pandas as pd
openai.api_key = os.environ.get('API_KEY_OPENAI')
TEST = True
def load_dreams(file_name="..\\Scene_Analyzer\\sample_texts_normalized.csv"):
"""
Load dreams from a CSV file.
Args:
file_name (str, optional): The path to the CSV file.
Defaults to "..\\Scene_Analyzer\\sample_texts_normalized.csv".
Returns:
pandas.DataFrame: A dataframe containing the loaded dreams.
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
file_name = os.path.join(dir_path,"sample_texts_normalized.csv")
dream_list = pd.read_csv(file_name, header=None)
return dream_list
def get_samples(file_name="manual_scene_separation_data.txt"):
"""
Open the file with the manually separated scenes and return a list of the separated dreams.
file_name: Name of the file containing the separated scenes. Default is 'manual_scene_separation_data.txt'.
Returns:
List of strings containing the separated dreams.
"""
samples = []
dir_path = os.path.dirname(os.path.realpath(__file__))
file_name = os.path.join(dir_path,file_name)
try:
with open(file_name, "r") as f:
data = f.read()
samples = data.split("###")[1:-1]
samples = [s.replace("IN:", "").strip() for s in samples]
except FileNotFoundError:
print(f"File '{file_name}' not found.")
return samples
def build_prompt(
dream, command="Give short visual descriptions of the scenes in the following:", n=3
):
"""
Build the prompt for the API call.
Start by giving n examples and their separation, then pass the command and the dream to be separated.
Args:
dream (str): The dream to be separated.
command (str, optional): The command to be passed to the model.
n (int, optional) = number of examples of manual separation to pass to the model
"""
examples = ""
samples = get_samples()
# build the examples string from the manual separation data
for i in range(0, min(len(samples), n)):
examples += samples[i]
examples += os.linesep
# If we are passing examples in the prompt, we need to add "Examples:" to the prompt, otherwise we don't.
if examples != "":
prompt = f"Examples of dreams and scene seperation:\
{examples.strip()}\
{os.linesep}\
{command}\
{os.linesep}\
{dream}"
else:
prompt = f"{command}{os.linesep}{dream}"
return prompt
def load_latest_output():
"""
Load the latest output from "out.txt" file.
Reads the text from "out.txt" file and splits it into a list of strings based on the "OUT:" keyword.
It then further splits the list based on the "Scene" keyword, removes the line numbers and returns the list.
Returns:
List of strings containing the latest output from the "out.txt" file.
"""
with open("out.txt", "r") as f:
text = f.read()
split_text = text.split("OUT:") # split the text based on the "OUT:" keyword
gen_list = split_text[-1].split("Scene")[1:] # remove the first element
gen_list = [re.sub(r"[0-9]\: ", "", x) for x in gen_list] # remove the line numbers
gen_list[-1] = gen_list[-1].split('\n')[0]+'\n' # remove the last line
return gen_list
def call_openai(
dream,
command="Give short visual descriptions of the scenes in the following:",
test=False,
):
"""
A function to call the OpenAI API and return a list of scenes resulting from the separation.
dream = the dream to be analyzed
command = the command to be passed to the model
test = if True, the function will return a temporary text instead of calling the API
"""
# temporary text to not spend tokens on the API
if test == True:
return load_latest_output()
# model_engine = "text-curie-001"
# model_engine = "davinci-002"
model_engine = "text-davinci-003" # the best one so far for this task
# API call to OpenAI GPT-3 using this schema:
# https://beta.openai.com/docs/api-reference/completions/create
# generated_text = "\n\n1. The first scene is of a person on an escalator, with plastic squares and water rolling along the side. The person later learns that they are filters. \n\n2. The second scene is of a large church where a mardi gras parade is taking place inside while mass is being celebrated in peace. \n\n3. The third scene is of a clerk coming to collect a bill which has already been paid. He has with him graded papers from a school, but the person does not see their son's name."
# generated_text ='''1. Two dogs running across a sandy desert, with a person running away from them in the distance.
# 2. A child standing in a lush, green forest, looking around curiously.
# 3. An elderly woman sitting at a table in a brightly lit shopping mall,enjoying a cone of ice cream.'''
# generated_text = """
# Scene 1: This is scene 1.
# Scene 2: This is scene 2.
# 3. This is line 3.
# """
prompt = build_prompt(dream.lstrip(), command, n=3)
completions = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=256,
n=1,
stop=None, # optional token that stops the generation
temperature=0.45, # not too high
)
# # # Print the generated text
generated_text = completions.choices[0].text
# Append the generated text to the output file to keep track of the results.
with open("out.txt", "a+") as f:
f.write(f"Prompt: {prompt}")
ic(f"Prompt: {prompt}")
f.write(f"Output: {generated_text}")
ic(f"Output: {generated_text}")
f.write(os.linesep)
f.write(f"########################")
ic(f"########################")
f.write(os.linesep)
def split_generated(generated_text):
"""
Split the generated text into multiple scenes based on the occurrence of the word "Scene".
Args:
generated_text: The text to be split.
Returns:
A list of split scenes.
"""
split_text = generated_text.split("Scene")[1:] # remove the first element because it's empty
if len(split_text) != 0:
return split_text
pattern = r"^\d+\."
# Split the text using the pattern
split_text = re.split(pattern, generated_text, flags=re.MULTILINE)
if len(split_text) == 0:
split_text = generated_text.split('\n')
return split_text[1:]
gen_list = split_generated(generated_text)
gen_list = [re.sub(r"[0-9](\:|\.) ", "", x) for x in gen_list]
ic(gen_list)
return gen_list
def separate():
"""
return a random dream from the csv
"""
# load the dreams from the csv
dream_list = load_dreams(file_name="..\\Scene_Analyzer\\dream-export.csv")
# show a random dream
rand.seed(os.urandom(32))
return dream_list[0][rand.randint(0, len(dream_list) - 1)]
def separate_random(test = False,command="Give short visual descriptions of the scenes in the following:"):
"""
load a random dream from the csv and return the call to openai scene separator on it.
"""
text = separate()
ic(text)
return call_openai(text, test=test,command=command)
if __name__ == "__main__":
# Load a random dream from the csv and call the openai scene separator on it.
separate_random(test=TEST) | [] |
2024-01-10 | uclanlp/biases-llm-reference-letters | agency_classifier~agency_generation_util.py | import re
import random
import torch
import openai
from ratelimiter import RateLimiter
from retrying import retry
AGENCY_DATASET_GEN_PROMPTS = {
'You will rephrase a biography two times to demonstrate agentic and communal language traits respectively. "agentic" is defined as more achievement-oriented, and "communal" is defined as more social or service-oriented. The paragraph is: "{}"'
}
# # Uncomment this part and fill in your OpenAI organization and API key to query ChatGPT's API
# openai.organization = $YOUR_ORGANIZATION$
# openai.api_key = $YOUR_API_KEY$
# To avoid exceeding rate limit for ChatGPT API
@retry(stop_max_attempt_number=10)
@RateLimiter(max_calls=20, period=60)
def generate_response_fn(utt):
prompt = random.sample(AGENCY_DATASET_GEN_PROMPTS, 1)[0] # .format(utt)
utt = " ".join([prompt, utt])
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": utt}]
)
# print('ChatGPT: {}'.format(response["choices"][0]["message"]["content"].strip()))
return response["choices"][0]["message"]["content"].strip()
| [
"{'You will rephrase a biography two times to demonstrate agentic and communal language traits respectively. \"agentic\" is defined as more achievement-oriented, and \"communal\" is defined as more social or service-oriented. The paragraph is: \"{}\"'}"
] |
2024-01-10 | iamirrf/aibudget | AI-Powered%20Budget%20App.py | import openai
import pandas as pd
import os
# Initialize OpenAI API
openai.api_key = "YOUAPIKEY"
FILE_NAME = "users_data.xlsx"
current_user = None
# Utility functions
def save_to_excel(data):
df = pd.DataFrame(data)
df.to_excel(FILE_NAME)
def read_from_excel():
if os.path.exists(FILE_NAME):
return pd.read_excel(FILE_NAME, index_col=0).to_dict(orient='index')
return {}
users = read_from_excel()
def register():
global current_user
username = input("Enter a username: ")
if username in users:
print("Username already exists.")
return
password = input("Enter a password: ")
users[username] = {"password": password, "income": 0, "expenses": {}}
current_user = username
save_to_excel(users)
print("Registration successful!")
def login():
global current_user
username = input("Enter your username: ")
password = input("Enter your password: ")
if users.get(username) and users[username]["password"] == password:
current_user = username
print("Logged in successfully!")
else:
print("Invalid credentials!")
def add_income():
income = float(input("Enter your monthly income: "))
users[current_user]["income"] = income
save_to_excel(users)
def add_expense():
category = input("Enter expense category (e.g. Rent, Food, Entertainment): ")
amount = float(input(f"Enter amount for {category}: "))
users[current_user]["expenses"][category] = amount
save_to_excel(users)
def view_budget():
print(f"\nIncome: ${users[current_user]['income']}")
print("Expenses:")
for category, amount in users[current_user]['expenses'].items():
print(f"{category}: ${amount}")
total_expenses = sum(users[current_user]['expenses'].values())
print(f"Total Expenses: ${total_expenses}")
print(f"Remaining Budget: ${users[current_user]['income'] - total_expenses}\n")
def get_ai_suggestions():
total_expenses = sum(users[current_user]['expenses'].values())
remaining_budget = users[current_user]['income'] - total_expenses
prompt = f"I have a monthly income of ${users[current_user]['income']} and expenses totaling ${total_expenses}. Here's a breakdown of my expenses: {users[current_user]['expenses']}. How can I achieve financial freedom?"
response = openai.Completion.create(engine="davinci", prompt=prompt, max_tokens=150)
print(response.choices[0].text.strip())
def menu():
while True:
print("\nAI-powered Budgeting App")
print("1. Register")
print("2. Login")
print("3. Add Income")
print("4. Add Expense")
print("5. View Budget Overview")
print("6. Get AI-Powered Financial Suggestions")
print("7. Exit")
choice = input("Enter your choice: ")
if choice == "1":
register()
elif choice == "2":
login()
elif choice == "3":
if current_user:
add_income()
else:
print("Please login first!")
elif choice == "4":
if current_user:
add_expense()
else:
print("Please login first!")
elif choice == "5":
if current_user:
view_budget()
else:
print("Please login first!")
elif choice == "6":
if current_user:
get_ai_suggestions()
else:
print("Please login first!")
elif choice == "7":
break
else:
print("Invalid choice!")
if __name__ == "__main__":
menu()
| [
"f\"I have a monthly income of ${users[current_user]['income']} and expenses totaling ${total_expenses}. Here's a breakdown of my expenses: {users[current_user]['expenses']}. How can I achieve financial freedom?",
"income",
"expenses"
] |
2024-01-10 | RCGAI/SimplyRetrieve | chat~prepare.py | # Copyright (c) Kioxia Corporation and its affiliates.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import glob
import pandas as pd
import csv
import json
import importlib
import numpy as np
from typing import List
from tqdm import tqdm
from langchain.schema import Document
from langchain.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter, TokenTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
import faiss
import gradio as gr
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default=None)
parser.add_argument('--input', type=str, default='data/')
parser.add_argument('--output', type=str, default='knowledge/')
parser.add_argument('--split_encoder', type=str, default='gpt2')
parser.add_argument('--split_chunk_size', type=int, default=100)
parser.add_argument('--split_chunk_overlap', type=int, default=0)
parser.add_argument('--out_docs', type=str, default='local_knowledgebase')
parser.add_argument('--out_docsext', type=str, default='.tsv')
parser.add_argument('--do_embed', type=bool, default=True)
parser.add_argument('--embed_encoder', type=str, default='intfloat/multilingual-e5-base')
parser.add_argument('--out_embed', type=str, default='local_embed')
parser.add_argument('--out_embedext', type=str, default='.npy')
parser.add_argument('--index_method', type=str, default='hnsw')
parser.add_argument('--index_hnsw_m', type=int, default=64)
parser.add_argument('--out_index', type=str, default='local_index')
parser.add_argument('--out_indexext', type=str, default='.index')
parser.add_argument('--index_ivfpq_nlist', type=int, default=256)
parser.add_argument('--index_ivfpq_nsegment', type=int, default=16)
parser.add_argument('--index_ivfpq_nbit', type=int, default=8)
args, unknown = parser.parse_known_args()
os.makedirs(args.output, exist_ok=True)
def initialize_config(config):
with open(config, "r", encoding="utf-8") as reader:
text = reader.read()
kwargs = json.loads(text)
return kwargs
def initialize_loaders(kwargs):
for k, v in kwargs.items():
if k == 'loader_config':
for kk, vv in v.items():
if kk == 'ext_types':
for kkk, vvv in vv.items():
if isinstance(vvv, str) and vvv.startswith('langchain.'):
loader_module = importlib.import_module('.'.join(vvv.split('.')[:-1]))
loader = getattr(loader_module, vvv.split('.')[-1])
kwargs[k][kk][kkk] = loader
def docslist_load_local(directory):
ext = "*.pdf"
docslist = glob.glob(os.path.join(directory, ext), recursive=True)
return docslist
def docslist_load_config(directory, loaders):
docslist = []
for ext, loader in loaders.items():
docslist.extend(glob.glob(os.path.join(directory, "*"+ext), recursive=True))
return docslist
def documents_load_local(doc_path):
docs = []
docs.extend(PyMuPDFLoader(doc_path).load())
return docs
def documents_load_config(doc_path, loaders):
docs = []
docs.extend(loaders['.'+doc_path.split('.')[-1]](doc_path).load())
return docs
def documents_load_file(doc_file, loaders):
docs = []
docs.extend(loaders['.'+doc_file.name.split('.')[-1]](doc_file.name).load())
return docs
def documents_split(docs, encoder, chunk_size, chunk_overlap):
#text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
text_splitter = TokenTextSplitter(encoding_name=encoder, chunk_size=chunk_size, chunk_overlap=chunk_overlap)
docs_split = text_splitter.split_documents(docs)
return docs_split
def documents_save(docs_split, path, filename, fileformat, index_current):
global cnt_save
savepath = os.path.join(path, filename+fileformat)
if fileformat == ".tsv":
docs_page_content = [o.page_content for o in docs_split]
docs_page_content = pd.DataFrame(docs_page_content, columns=["page_content"])
docs_page_content = docs_page_content.replace(r'-\n','', regex=True)
docs_page_content = docs_page_content.replace(r'\n',' ', regex=True)
docs_page_content = docs_page_content.replace(r'\t',' ', regex=True)
docs_source = [o.metadata for o in docs_split]
docs_source = pd.DataFrame(docs_source, columns=["source"])
docs_out = pd.concat([docs_page_content, docs_source], axis=1)
docs_out.index += 1
if cnt_save == 0:
writemode = 'w'
cnt_save += 1
else:
docs_out.index += index_current
writemode = 'a'
docs_out.to_csv(savepath, index=True, header=False, sep='\t', mode=writemode)
else:
raise NotImplementedError("Not implemented at this time")
return
def embedding_create(encoder, path, filename, fileformat):
loadpath = os.path.join(path, filename+fileformat)
docs_split = pd.read_csv(loadpath, sep='\t', header=None)
print("total number of split passages for embedding:", len(docs_split))
embeddings = HuggingFaceEmbeddings(model_name=encoder, encode_kwargs={'normalize_embeddings': True})
embed_split = embeddings.embed_documents(docs_split[1])
return embed_split
def embedding_save(embed_split, path, filename, fileformat):
savepath = os.path.join(path, filename+fileformat)
np.save(savepath, embed_split)
return
def index_create(embed_split, method, hnsw_m, nlist, nsegment, nbit):
if method == 'flat':
index_split = faiss.IndexFlat(embed_split.shape[1], faiss.METRIC_INNER_PRODUCT)
elif method == 'hnsw':
index_split = faiss.IndexHNSWFlat(embed_split.shape[1], hnsw_m, faiss.METRIC_INNER_PRODUCT)
elif method == 'indexivfpq_hnsw':
coarse_split = faiss.IndexHNSWFlat(embed_split.shape[1], hnsw_m, faiss.METRIC_INNER_PRODUCT)
index_split = faiss.IndexIVFPQ(coarse_split, embed_split.shape[1], nlist, nsegment, nbit)
index_split.train(embed_split)
else:
raise NotImplementedError("Not implemented at this time")
index_split.add(embed_split)
return index_split
def index_save(index_split, path, filename, fileformat):
savepath = os.path.join(path, filename+fileformat)
faiss.write_index(index_split, savepath)
return
def insert_knowledge(config, k_dir, k_basename, k_disp, k_desc):
config_new = config
for item in config_new["retriever_config"]["retriever"]:
if item["knowledgebase"] == os.path.join(k_dir, k_basename + ".tsv"):
item["name"] = k_disp
item["description"] = k_desc
return config_new
new_knowledge = {"name": k_disp,
"description": k_disp,
"knowledgebase": os.path.join(k_dir, k_basename + ".tsv"),
"index": os.path.join(k_dir, k_basename + ".index"),
"index_type": "hnsw"
}
config_new["retriever_config"]["retriever"].append(new_knowledge)
return config_new
def upload_knowledge(config, path_files, k_dir, k_basename, progress=gr.Progress()):
global cnt_save
if path_files == None:
if os.path.exists(os.path.join(k_dir, k_basename+args.out_docsext)):
return "Loaded existing knowledge"
else:
return "No knowledge to load"
if os.path.exists(os.path.join(k_dir, k_basename+args.out_docsext)):
cnt_save = 1
else:
cnt_save = 0
progress(0.1, desc="Preparing")
os.makedirs(k_dir, exist_ok=True)
kwargs = config
print("configs:", kwargs)
initialize_loaders(kwargs)
docslist = path_files
print("total number of readable documents:", len(docslist))
print("readable documents:", docslist)
progress(0.3, desc="Loading, Splitting and Saving Documents")
print("loading, splitting and saving documents...")
cnt_passage = 0
cnt_split = 0
for item in tqdm(docslist):
docs = documents_load_file(item, kwargs['loader_config']['ext_types'])
docs_split = documents_split(docs, args.split_encoder, args.split_chunk_size, args.split_chunk_overlap)
documents_save(docs_split, k_dir, k_basename, args.out_docsext, cnt_split)
cnt_passage += len(docs)
cnt_split += len(docs_split)
print("total number of loaded passages:", cnt_passage)
print("total number of split passages:", cnt_split)
progress(0.5, desc="Creating and Saving Embedding")
print("creating embedding")
embed_split = embedding_create(kwargs['retriever_config']['encoder'], k_dir, k_basename, args.out_docsext)
print("total number of embeddings:", len(embed_split))
print("saving embedding")
embedding_save(embed_split, k_dir, k_basename, args.out_embedext)
progress(0.8, desc="Creating and Saving Index")
print("creating index")
embed_split = np.array(embed_split)
index_split = index_create(embed_split, args.index_method, args.index_hnsw_m, args.index_ivfpq_nlist, args.index_ivfpq_nsegment, args.index_ivfpq_nbit)
print("total number of indexes:", index_split.ntotal)
print("saving index")
index_save(index_split, k_dir, k_basename, args.out_indexext)
print("documents preparation completed")
return "New knowledge loaded"
def main():
print(args)
print("loading documents list...")
if args.config == None:
docslist = docslist_load_local(args.input)
else:
print("use config file")
kwargs = initialize_config(args.config)
print("configs:", kwargs)
initialize_loaders(kwargs)
docslist = docslist_load_config(args.input, kwargs['loader_config']['ext_types'])
docslist.sort()
print("total number of readable documents:", len(docslist))
print("readable documents:", docslist)
print("loading, splitting and saving documents...")
cnt_passage = 0
cnt_split = 0
if args.config == None:
for item in tqdm(docslist):
docs = documents_load_local(item)
docs_split = documents_split(docs, args.split_encoder, args.split_chunk_size, args.split_chunk_overlap)
documents_save(docs_split, args.output, args.out_docs, args.out_docsext, cnt_split)
cnt_passage += len(docs)
cnt_split += len(docs_split)
else:
for item in tqdm(docslist):
docs = documents_load_config(item, kwargs['loader_config']['ext_types'])
docs_split = documents_split(docs, args.split_encoder, args.split_chunk_size, args.split_chunk_overlap)
documents_save(docs_split, args.output, args.out_docs, args.out_docsext, cnt_split)
cnt_passage += len(docs)
cnt_split += len(docs_split)
print("total number of loaded passages:", cnt_passage)
print("total number of split passages:", cnt_split)
if args.do_embed:
print("creating embedding")
embed_split = embedding_create(args.embed_encoder, args.output, args.out_docs, args.out_docsext)
print("total number of embeddings:", len(embed_split))
print("saving embedding")
embedding_save(embed_split, args.output, args.out_embed, args.out_embedext)
print("creating index")
embed_split = np.array(embed_split)
index_split = index_create(embed_split, args.index_method, args.index_hnsw_m, args.index_ivfpq_nlist, args.index_ivfpq_nsegment, args.index_ivfpq_nbit)
print("total number of indexes:", index_split.ntotal)
print("saving index")
index_save(index_split, args.output, args.out_index, args.out_indexext)
print("documents preparation completed")
if __name__ == "__main__":
cnt_save = 0
main()
| [] |
2024-01-10 | Ibtisam-Mohammad/Cohere-Interview-Bot | try.py | import streamlit as st
import cohere
from PyPDF2 import PdfReader
# uploaded_file = st.file_uploader('Choose your .pdf file', type="pdf")
# input_text = st.text_input(label='Give the role you are applying for',key="input")
# print('-----------------',input_text,'--------------')
# print(type(input_text))
# if input_text=='':
# print('+++++++++++',1)
# if uploaded_file is not None:
# reader = PdfReader("resume_juanjosecarin.pdf")
# number_of_pages = len(reader.pages)
# page = reader.pages
# print(page[0].extract_text())
API_KEY='qKNifTO0EkVWeXAaVzfnnDROVaDZmSbQL5ILgMmc'
co = cohere.Client(API_KEY)
response_resume = co.generate(
model='command-xlarge-nightly',
prompt='Hello:',
max_tokens=10,
temperature=0,
k=0,
p=0.75,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=[],
return_likelihoods='NONE')
print('response_resume:',response_resume.generations[0].text) | [] |
2024-01-10 | jacsice/quivr | files.py | import os
from typing import (
Any,
Union,
)
import zipfile
import streamlit as st
from streamlit.runtime.uploaded_file_manager import (
UploadedFile,
UploadedFileRec,
UploadedFileManager,
)
from streamlit.runtime.scriptrunner import get_script_run_ctx
from supabase.client import Client
from langchain.vectorstores.supabase import SupabaseVectorStore
from components_keys import ComponentsKeys
from loaders.audio import process_audio
from loaders.txt import process_txt
from loaders.csv import process_csv
from loaders.markdown import process_markdown
from loaders.pdf import process_pdf
from loaders.html import (
create_html_file,
delete_tempfile,
get_html,
process_html,
)
from loaders.powerpoint import process_powerpoint
from loaders.docx import process_docx
from utils import compute_sha1_from_content
ctx = get_script_run_ctx()
manager = UploadedFileManager()
file_processors = {
".txt": process_txt,
".csv": process_csv,
".md": process_markdown,
".markdown": process_markdown,
".m4a": process_audio,
".mp3": process_audio,
".webm": process_audio,
".mp4": process_audio,
".mpga": process_audio,
".wav": process_audio,
".mpeg": process_audio,
".pdf": process_pdf,
".html": process_html,
".pptx": process_powerpoint,
".docx": process_docx
}
def file_uploader(supabase, vector_store):
# Omit zip file support if the `st.secrets.self_hosted` != "true" because
# a zip file can consist of multiple files so the limit on 1 file uploaded
# at a time in the demo can be circumvented.
accepted_file_extensions = list(file_processors.keys())
accept_multiple_files = st.secrets.self_hosted == "true"
if accept_multiple_files:
accepted_file_extensions += [".zip"]
files = st.file_uploader(
"**Upload a file**",
accept_multiple_files=accept_multiple_files,
type=accepted_file_extensions,
key=ComponentsKeys.FILE_UPLOADER,
)
if st.secrets.self_hosted == "false":
st.markdown("**In demo mode, the max file size is 1MB**")
if st.button("Add to Database"):
# Single file upload
if isinstance(files, UploadedFile):
filter_file(files, supabase, vector_store)
# Multiple files upload
elif isinstance(files, list):
for file in files:
filter_file(file, supabase, vector_store)
def file_already_exists(supabase, file):
file_sha1 = compute_sha1_from_content(file.getvalue())
response = supabase.table("documents").select("id").eq("metadata->>file_sha1", file_sha1).execute()
return len(response.data) > 0
def file_to_uploaded_file(file: Any) -> Union[None, UploadedFile]:
"""Convert a file to a streamlit `UploadedFile` object.
This allows us to unzip files and treat them the same way
streamlit treats files uploaded through the file uploader.
Parameters
---------
file : Any
The file. Can be any file supported by this app.
Returns
-------
Union[None, UploadedFile]
The file converted to a streamlit `UploadedFile` object.
Returns `None` if the script context cannot be grabbed.
"""
if ctx is None:
print("script context not found, skipping uploading file:", file.name)
return
file_extension = os.path.splitext(file.name)[-1]
file_name = file.name
file_data = file.read()
# The file manager will automatically assign an ID so pass `None`
# Reference: https://github.com/streamlit/streamlit/blob/9a6ce804b7977bdc1f18906d1672c45f9a9b3398/lib/streamlit/runtime/uploaded_file_manager.py#LL98C6-L98C6
uploaded_file_rec = UploadedFileRec(None, file_name, file_extension, file_data)
uploaded_file_rec = manager.add_file(
ctx.session_id,
ComponentsKeys.FILE_UPLOADER,
uploaded_file_rec,
)
return UploadedFile(uploaded_file_rec)
def filter_zip_file(
file: UploadedFile,
supabase: Client,
vector_store: SupabaseVectorStore,
) -> None:
"""Unzip the zip file then filter each unzipped file.
Parameters
----------
file : UploadedFile
The uploaded file from the file uploader.
supabase : Client
The supabase client.
vector_store : SupabaseVectorStore
The vector store in the database.
"""
with zipfile.ZipFile(file, "r") as z:
unzipped_files = z.namelist()
for unzipped_file in unzipped_files:
with z.open(unzipped_file, "r") as f:
filter_file(f, supabase, vector_store)
def filter_file(file, supabase, vector_store):
# Streamlit file uploads are of type `UploadedFile` which has the
# necessary methods and attributes for this app to work.
if not isinstance(file, UploadedFile):
file = file_to_uploaded_file(file)
file_extension = os.path.splitext(file.name)[-1]
if file_extension == ".zip":
filter_zip_file(file, supabase, vector_store)
return True
if file_already_exists(supabase, file):
st.write(f"😎 {file.name} is already in the database.")
return False
if file.size < 1:
st.write(f"💨 {file.name} is empty.")
return False
if file_extension in file_processors:
if st.secrets.self_hosted == "false":
file_processors[file_extension](vector_store, file, stats_db=supabase)
else:
file_processors[file_extension](vector_store, file, stats_db=None)
st.write(f"✅ {file.name} ")
return True
st.write(f"❌ {file.name} is not a valid file type.")
return False
def url_uploader(supabase, vector_store):
url = st.text_area("**Add an url**",placeholder="https://www.quivr.app")
button = st.button("Add the URL to the database")
if button:
if not st.session_state["overused"]:
html = get_html(url)
if html:
st.write(f"Getting content ... {url} ")
try:
file, temp_file_path = create_html_file(url, html)
except UnicodeEncodeError as e:
st.write(f"❌ Error encoding character: {e}")
file, temp_file_path = create_html_file(url, html)
ret = filter_file(file, supabase, vector_store)
delete_tempfile(temp_file_path, url, ret)
else:
st.write(f"❌ Failed to access to {url} .")
else:
st.write("You have reached your daily limit. Please come back later or self host the solution.") | [] |
2024-01-10 | jacsice/quivr | v2~txt.py | from common import process_file
from langchain.document_loaders import TextLoader
from fastapi import UploadFile
async def process_txt(vector_store, file: UploadFile, stats_db):
return await process_file(vector_store, file, TextLoader, stats_db=stats_db) | [] |
2024-01-10 | jacsice/quivr | question.py | import anthropic
import streamlit as st
from streamlit.logger import get_logger
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.llms import OpenAI
from langchain.chat_models import ChatAnthropic
from langchain.vectorstores import SupabaseVectorStore
from stats import add_usage
memory = ConversationBufferMemory(
memory_key="chat_history", return_messages=True)
openai_api_key = st.secrets.openai_api_key
anthropic_api_key = st.secrets.anthropic_api_key
logger = get_logger(__name__)
def count_tokens(question, model):
count = f'Words: {len(question.split())}'
if model.startswith("claude"):
count += f' | Tokens: {anthropic.count_tokens(question)}'
return count
def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
if 'chat_history' not in st.session_state:
st.session_state['chat_history'] = []
question = st.text_area("## Ask a question")
columns = st.columns(3)
with columns[0]:
button = st.button("Ask")
with columns[1]:
count_button = st.button("Count Tokens", type='secondary')
with columns[2]:
clear_history = st.button("Clear History", type='secondary')
if clear_history:
st.session_state['chat_history'] = []
st.experimental_rerun()
if button:
qa = None
if not st.session_state["overused"]:
add_usage(stats_db, "chat", "prompt" + question, {"model": model, "temperature": st.session_state['temperature']})
if model.startswith("gpt"):
logger.info('Using OpenAI model %s', model)
qa = ConversationalRetrievalChain.from_llm(
OpenAI(
model_name=st.session_state['model'], openai_api_key=openai_api_key, temperature=st.session_state['temperature'], max_tokens=st.session_state['max_tokens']), vector_store.as_retriever(), memory=memory, verbose=True)
elif anthropic_api_key and model.startswith("claude"):
logger.info('Using Anthropics model %s', model)
qa = ConversationalRetrievalChain.from_llm(
ChatAnthropic(
model=st.session_state['model'], anthropic_api_key=anthropic_api_key, temperature=st.session_state['temperature'], max_tokens_to_sample=st.session_state['max_tokens']), vector_store.as_retriever(), memory=memory, verbose=True, max_tokens_limit=102400)
st.session_state['chat_history'].append(("You", question))
# Generate model's response and add it to chat history
model_response = qa({"question": question})
logger.info('Result: %s', model_response)
st.session_state['chat_history'].append(("Quivr", model_response["answer"]))
# Display chat history
st.empty()
for speaker, text in st.session_state['chat_history']:
st.markdown(f"**{speaker}:** {text}")
else:
st.error("You have used all your free credits. Please try again later or self host.")
if count_button:
st.write(count_tokens(question, model))
| [] |
2024-01-10 | jacsice/quivr | v2~common.py | from typing import Optional
from fastapi import UploadFile
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
# from stats import add_usage
import asyncio
import os
import tempfile
import time
from utils import compute_sha1_from_file, compute_sha1_from_content
async def process_file(vector_store, file: UploadFile, loader_class, stats_db: Optional = None):
documents = []
file_sha = ""
file_name = file.filename
file_size = file.file._file.tell() # Getting the size of the file
dateshort = time.strftime("%Y%m%d")
# Here, we're writing the uploaded file to a temporary file, so we can use it with your existing code.
with tempfile.NamedTemporaryFile(delete=False, suffix=file.filename) as tmp_file:
await file.seek(0)
content = await file.read()
tmp_file.write(content)
tmp_file.flush()
loader = loader_class(tmp_file.name)
documents = loader.load()
file_sha1 = compute_sha1_from_file(tmp_file.name) # Ensure this function works with FastAPI
os.remove(tmp_file.name)
chunk_size = 500
chunk_overlap = 0
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
print(documents)
documents = text_splitter.split_documents(documents)
# Add the document sha1 as metadata to each document
docs_with_metadata = [Document(page_content=doc.page_content, metadata={"file_sha1": file_sha1, "file_size":file_size , "file_name": file_name, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for doc in documents]
vector_store.add_documents(docs_with_metadata)
# if stats_db:
# add_usage(stats_db, "embedding", "file", metadata={"file_name": file_name,"file_type": file.filename, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap})
return
async def file_already_exists(supabase, file):
file_content = await file.read()
file_sha1 = compute_sha1_from_content(file_content)
response = supabase.table("documents").select("id").eq("metadata->>file_sha1", file_sha1).execute()
return len(response.data) > 0
| [] |
2024-01-10 | magn3144/mavis1 | searchclient~sound_to_text.py | import openai
import os
def convert_audio_to_text(audio_file):
transcript = openai.Audio.transcribe("whisper-1", audio_file)
return transcript.text | [] |
2024-01-10 | GermODread/Skynet-cli | Skynet.py | #!/usr/bin/python3
import openai
import os
import sys
### Add the following line to .bashrc or .zshrc or any profile
# export OPENAI_API_KEY='API key'
api = 'OPEN_AI_API' # Not recommended to keep it here. Save using OpenAI builtin asset management function.
cmd = sys.argv[1]
args = sys.argv[2:]
question = " ".join(args)
def helperdocumentation():
print("Available functions:")
print(skynetstream.__doc__)
print(editgpt.__doc__)
print(imgcreate.__doc__)
def skynetstream(q, z):
"""Chat completion with GPT-4
Not using stream version"""
openai.api_key = os.getenv(z)
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": "Welcome to the new World"},
{"role": "user", "content": f"{q}"},
{"role": "assistant", "content": ""}
],
)
print(response['choices'][0]['message']['content'])
def imgcreate(q, z):
"""Image creation.
Use "img" followed by text description of image, to create an image using Dall-E".
Uses dall-e language model to create images based on text.
"""
openai.api_key = os.getenv(z)
response = openai.Image.create(prompt=f"{q}", n=1, size="1024x1024")
print(response.data)
def editgpt(q, z):
"""Text proofing.
Use "edit" followed by text to proof any spelling mistakes.
Uses text-davinci-edit-001 to correct any spelling mistakes.
"""
openai.api_key = os.getenv(z)
response = openai.Edit.create(
model="text-davinci-edit-001",
input=f"{q}",
instruction="Fix the spelling mistakes",
)
print(response.choices[0].text)
if __name__ == "__main__":
if cmd == "stream":
skynetstream(question, api)
if cmd == "img":
imgcreate(question, api)
if cmd == "edit":
editgpt(question, api)
if cmd == "help":
helperdocumentation()
| [
"Welcome to the new World"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~supabase.py | from __future__ import annotations
import uuid
from itertools import repeat
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import supabase
class SupabaseVectorStore(VectorStore):
"""`Supabase Postgres` vector store.
It assumes you have the `pgvector`
extension installed and a `match_documents` (or similar) function. For more details:
https://integrations.langchain.com/vectorstores?integration_name=SupabaseVectorStore
You can implement your own `match_documents` function in order to limit the search
space to a subset of documents based on your own authorization or business logic.
Note that the Supabase Python client does not yet support async operations.
If you'd like to use `max_marginal_relevance_search`, please review the instructions
below on modifying the `match_documents` function to return matched embeddings.
Examples:
.. code-block:: python
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
from langchain.vectorstores import SupabaseVectorStore
from supabase.client import create_client
docs = [
Document(page_content="foo", metadata={"id": 1}),
]
embeddings = OpenAIEmbeddings()
supabase_client = create_client("my_supabase_url", "my_supabase_key")
vector_store = SupabaseVectorStore.from_documents(
docs,
embeddings,
client=supabase_client,
table_name="documents",
query_name="match_documents",
)
To load from an existing table:
.. code-block:: python
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import SupabaseVectorStore
from supabase.client import create_client
embeddings = OpenAIEmbeddings()
supabase_client = create_client("my_supabase_url", "my_supabase_key")
vector_store = SupabaseVectorStore(
client=supabase_client,
embedding=embeddings,
table_name="documents",
query_name="match_documents",
)
"""
def __init__(
self,
client: supabase.client.Client,
embedding: Embeddings,
table_name: str,
query_name: Union[str, None] = None,
) -> None:
"""Initialize with supabase client."""
try:
import supabase # noqa: F401
except ImportError:
raise ImportError(
"Could not import supabase python package. "
"Please install it with `pip install supabase`."
)
self._client = client
self._embedding: Embeddings = embedding
self.table_name = table_name or "documents"
self.query_name = query_name or "match_documents"
@property
def embeddings(self) -> Embeddings:
return self._embedding
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[Any, Any]]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
ids = ids or [str(uuid.uuid4()) for _ in texts]
docs = self._texts_to_documents(texts, metadatas)
vectors = self._embedding.embed_documents(list(texts))
return self.add_vectors(vectors, docs, ids)
@classmethod
def from_texts(
cls: Type["SupabaseVectorStore"],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[supabase.client.Client] = None,
table_name: Optional[str] = "documents",
query_name: Union[str, None] = "match_documents",
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> "SupabaseVectorStore":
"""Return VectorStore initialized from texts and embeddings."""
if not client:
raise ValueError("Supabase client is required.")
if not table_name:
raise ValueError("Supabase document table_name is required.")
embeddings = embedding.embed_documents(texts)
ids = [str(uuid.uuid4()) for _ in texts]
docs = cls._texts_to_documents(texts, metadatas)
cls._add_vectors(client, table_name, embeddings, docs, ids)
return cls(
client=client,
embedding=embedding,
table_name=table_name,
query_name=query_name,
)
def add_vectors(
self,
vectors: List[List[float]],
documents: List[Document],
ids: List[str],
) -> List[str]:
return self._add_vectors(self._client, self.table_name, vectors, documents, ids)
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
vector = self._embedding.embed_query(query)
return self.similarity_search_by_vector(vector, k=k, filter=filter, **kwargs)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
result = self.similarity_search_by_vector_with_relevance_scores(
embedding, k=k, filter=filter, **kwargs
)
documents = [doc for doc, _ in result]
return documents
def similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
vector = self._embedding.embed_query(query)
return self.similarity_search_by_vector_with_relevance_scores(
vector, k=k, filter=filter
)
def match_args(
self, query: List[float], filter: Optional[Dict[str, Any]]
) -> Dict[str, Any]:
ret: Dict[str, Any] = dict(query_embedding=query)
if filter:
ret["filter"] = filter
return ret
def similarity_search_by_vector_with_relevance_scores(
self,
query: List[float],
k: int,
filter: Optional[Dict[str, Any]] = None,
postgrest_filter: Optional[str] = None,
) -> List[Tuple[Document, float]]:
match_documents_params = self.match_args(query, filter)
query_builder = self._client.rpc(self.query_name, match_documents_params)
if postgrest_filter:
query_builder.params = query_builder.params.set(
"and", f"({postgrest_filter})"
)
query_builder.params = query_builder.params.set("limit", k)
res = query_builder.execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
)
for search in res.data
if search.get("content")
]
return match_result
def similarity_search_by_vector_returning_embeddings(
self,
query: List[float],
k: int,
filter: Optional[Dict[str, Any]] = None,
postgrest_filter: Optional[str] = None,
) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]:
match_documents_params = self.match_args(query, filter)
query_builder = self._client.rpc(self.query_name, match_documents_params)
if postgrest_filter:
query_builder.params = query_builder.params.set(
"and", f"({postgrest_filter})"
)
query_builder.params = query_builder.params.set("limit", k)
res = query_builder.execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
# Supabase returns a vector type as its string represation (!).
# This is a hack to convert the string to numpy array.
np.fromstring(
search.get("embedding", "").strip("[]"), np.float32, sep=","
),
)
for search in res.data
if search.get("content")
]
return match_result
@staticmethod
def _texts_to_documents(
texts: Iterable[str],
metadatas: Optional[Iterable[Dict[Any, Any]]] = None,
) -> List[Document]:
"""Return list of Documents from list of texts and metadatas."""
if metadatas is None:
metadatas = repeat({})
docs = [
Document(page_content=text, metadata=metadata)
for text, metadata in zip(texts, metadatas)
]
return docs
@staticmethod
def _add_vectors(
client: supabase.client.Client,
table_name: str,
vectors: List[List[float]],
documents: List[Document],
ids: List[str],
) -> List[str]:
"""Add vectors to Supabase table."""
rows: List[Dict[str, Any]] = [
{
"id": ids[idx],
"content": documents[idx].page_content,
"embedding": embedding,
"metadata": documents[idx].metadata, # type: ignore
}
for idx, embedding in enumerate(vectors)
]
# According to the SupabaseVectorStore JS implementation, the best chunk size
# is 500
chunk_size = 500
id_list: List[str] = []
for i in range(0, len(rows), chunk_size):
chunk = rows[i : i + chunk_size]
result = client.from_(table_name).upsert(chunk).execute() # type: ignore
if len(result.data) == 0:
raise Exception("Error inserting: No rows added")
# VectorStore.add_vectors returns ids as strings
ids = [str(i.get("id")) for i in result.data if i.get("id")]
id_list.extend(ids)
return id_list
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
result = self.similarity_search_by_vector_returning_embeddings(
embedding, fetch_k
)
matched_documents = [doc_tuple[0] for doc_tuple in result]
matched_embeddings = [doc_tuple[2] for doc_tuple in result]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
matched_embeddings,
k=k,
lambda_mult=lambda_mult,
)
filtered_documents = [matched_documents[i] for i in mmr_selected]
return filtered_documents
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
`max_marginal_relevance_search` requires that `query_name` returns matched
embeddings alongside the match documents. The following function
demonstrates how to do this:
```sql
CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536),
match_count int)
RETURNS TABLE(
id uuid,
content text,
metadata jsonb,
embedding vector(1536),
similarity float)
LANGUAGE plpgsql
AS $$
# variable_conflict use_column
BEGIN
RETURN query
SELECT
id,
content,
metadata,
embedding,
1 -(docstore.embedding <=> query_embedding) AS similarity
FROM
docstore
ORDER BY
docstore.embedding <=> query_embedding
LIMIT match_count;
END;
$$;
```
"""
embedding = self._embedding.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult=lambda_mult
)
return docs
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
rows: List[Dict[str, Any]] = [
{
"id": id,
}
for id in ids
]
# TODO: Check if this can be done in bulk
for row in rows:
self._client.from_(self.table_name).delete().eq("id", row["id"]).execute()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~tavily_search.py | """Util that calls Tavily Search API.
In order to set this up, follow instructions at:
"""
import json
from typing import Dict, List, Optional
import aiohttp
import requests
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
TAVILY_API_URL = "https://api.tavily.com"
class TavilySearchAPIWrapper(BaseModel):
"""Wrapper for Tavily Search API."""
tavily_api_key: str
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _tavily_search_results(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> List[dict]:
params = {
"api_key": self.tavily_api_key,
"query": query,
"max_results": max_results,
"search_depth": search_depth,
"include_domains": include_domains,
"exclude_domains": exclude_domains,
"include_answer": include_answer,
"include_raw_content": include_raw_content,
"include_images": include_images,
}
response = requests.post(
# type: ignore
f"{TAVILY_API_URL}/search",
json=params,
)
response.raise_for_status()
search_results = response.json()
return self.clean_results(search_results["results"])
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
tavily_api_key = get_from_dict_or_env(
values, "tavily_api_key", "TAVILY_API_KEY"
)
values["tavily_api_key"] = tavily_api_key
return values
def results(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> List[Dict]:
"""Run query through Tavily Search and return metadata.
Args:
query: The query to search for.
max_results: The maximum number of results to return.
search_depth: The depth of the search. Can be "basic" or "advanced".
include_domains: A list of domains to include in the search.
exclude_domains: A list of domains to exclude from the search.
include_answer: Whether to include the answer in the results.
include_raw_content: Whether to include the raw content in the results.
include_images: Whether to include images in the results.
Returns:
query: The query that was searched for.
follow_up_questions: A list of follow up questions.
response_time: The response time of the query.
answer: The answer to the query.
images: A list of images.
results: A list of dictionaries containing the results:
title: The title of the result.
url: The url of the result.
content: The content of the result.
score: The score of the result.
raw_content: The raw content of the result.
""" # noqa: E501
raw_search_results = self._tavily_search_results(
query,
max_results,
search_depth,
include_domains,
exclude_domains,
include_answer,
include_raw_content,
include_images,
)
return raw_search_results
async def results_async(
self,
query: str,
max_results: Optional[int] = 5,
search_depth: Optional[str] = "advanced",
include_domains: Optional[List[str]] = [],
exclude_domains: Optional[List[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> List[Dict]:
"""Get results from the Tavily Search API asynchronously."""
# Function to perform the API call
async def fetch() -> str:
params = {
"api_key": self.tavily_api_key,
"query": query,
"max_results": max_results,
"search_depth": search_depth,
"include_domains": include_domains,
"exclude_domains": exclude_domains,
"include_answer": include_answer,
"include_raw_content": include_raw_content,
"include_images": include_images,
}
async with aiohttp.ClientSession() as session:
async with session.post(f"{TAVILY_API_URL}/search", json=params) as res:
if res.status == 200:
data = await res.text()
return data
else:
raise Exception(f"Error {res.status}: {res.reason}")
results_json_str = await fetch()
results_json = json.loads(results_json_str)
return self.clean_results(results_json["results"])
def clean_results(self, results: List[Dict]) -> List[Dict]:
"""Clean results from Tavily Search API."""
clean_results = []
for result in results:
clean_results.append(
{
"url": result["url"],
"content": result["content"],
}
)
return clean_results
| [
"content"
] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~autonomous_agents~baby_agi~task_creation.py | from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
class TaskCreationChain(LLMChain):
"""Chain generating tasks."""
@classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_creation_template = (
"Ты - AI, создающий задачи,"
" который использует результат работы исполнительного агента"
" для создания новых задач с следующей целью: {objective},"
" Последняя выполненная задача имеет результат: {result}."
" Этот результат был основан на этом описании задачи: {task_description}."
" Вот незавершенные задачи: {incomplete_tasks}."
" Основываясь на результате, создай новые задачи для выполнения"
" AI системой, которые не пересекаются с незавершенными задачами."
" Верни задачи в виде массива."
)
prompt = PromptTemplate(
template=task_creation_template,
input_variables=[
"result",
"task_description",
"incomplete_tasks",
"objective",
],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
| [
"Ты - AI, создающий задачи, который использует результат работы исполнительного агента для создания новых задач с следующей целью: {objective}, Последняя выполненная задача имеет результат: {result}. Этот результат был основан на этом описании задачи: {task_description}. Вот незавершенные задачи: {incomplete_tasks}. Основываясь на результате, создай новые задачи для выполнения AI системой, которые не пересекаются с незавершенными задачами. Верни задачи в виде массива.",
"task_description",
"incomplete_tasks"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~api~openapi~response_chain.py | """Response parser."""
import json
import re
from typing import Any
from langchain.chains.api.openapi.prompts import RESPONSE_TEMPLATE
from langchain.chains.llm import LLMChain
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BaseOutputParser
from langchain.schema.language_model import BaseLanguageModel
class APIResponderOutputParser(BaseOutputParser):
"""Parse the response and error tags."""
def _load_json_block(self, serialized_block: str) -> str:
try:
response_content = json.loads(serialized_block, strict=False)
return response_content.get("response", "ERROR parsing response.")
except json.JSONDecodeError:
return "ERROR parsing response."
except:
raise
def parse(self, llm_output: str) -> str:
"""Parse the response and error tags."""
json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
else:
raise ValueError(f"No response found in output: {llm_output}.")
@property
def _type(self) -> str:
return "api_responder"
class APIResponderChain(LLMChain):
"""Get the response parser."""
@classmethod
def from_llm(
cls, llm: BaseLanguageModel, verbose: bool = True, **kwargs: Any
) -> LLMChain:
"""Get the response parser."""
output_parser = APIResponderOutputParser()
prompt = PromptTemplate(
template=RESPONSE_TEMPLATE,
output_parser=output_parser,
input_variables=["response", "instructions"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
| [
"instructions",
"response"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~datadog_logs.py | from datetime import datetime, timedelta
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class DatadogLogsLoader(BaseLoader):
"""Load `Datadog` logs.
Logs are written into the `page_content` and into the `metadata`.
"""
def __init__(
self,
query: str,
api_key: str,
app_key: str,
from_time: Optional[int] = None,
to_time: Optional[int] = None,
limit: int = 100,
) -> None:
"""Initialize Datadog document loader.
Requirements:
- Must have datadog_api_client installed. Install with `pip install datadog_api_client`.
Args:
query: The query to run in Datadog.
api_key: The Datadog API key.
app_key: The Datadog APP key.
from_time: Optional. The start of the time range to query.
Supports date math and regular timestamps (milliseconds) like '1688732708951'
Defaults to 20 minutes ago.
to_time: Optional. The end of the time range to query.
Supports date math and regular timestamps (milliseconds) like '1688732708951'
Defaults to now.
limit: The maximum number of logs to return.
Defaults to 100.
""" # noqa: E501
try:
from datadog_api_client import Configuration
except ImportError as ex:
raise ImportError(
"Could not import datadog_api_client python package. "
"Please install it with `pip install datadog_api_client`."
) from ex
self.query = query
configuration = Configuration()
configuration.api_key["apiKeyAuth"] = api_key
configuration.api_key["appKeyAuth"] = app_key
self.configuration = configuration
self.from_time = from_time
self.to_time = to_time
self.limit = limit
def parse_log(self, log: dict) -> Document:
"""
Create Document objects from Datadog log items.
"""
attributes = log.get("attributes", {})
metadata = {
"id": log.get("id", ""),
"status": attributes.get("status"),
"service": attributes.get("service", ""),
"tags": attributes.get("tags", []),
"timestamp": attributes.get("timestamp", ""),
}
message = attributes.get("message", "")
inside_attributes = attributes.get("attributes", {})
content_dict = {**inside_attributes, "message": message}
content = ", ".join(f"{k}: {v}" for k, v in content_dict.items())
return Document(page_content=content, metadata=metadata)
def load(self) -> List[Document]:
"""
Get logs from Datadog.
Returns:
A list of Document objects.
- page_content
- metadata
- id
- service
- status
- tags
- timestamp
"""
try:
from datadog_api_client import ApiClient
from datadog_api_client.v2.api.logs_api import LogsApi
from datadog_api_client.v2.model.logs_list_request import LogsListRequest
from datadog_api_client.v2.model.logs_list_request_page import (
LogsListRequestPage,
)
from datadog_api_client.v2.model.logs_query_filter import LogsQueryFilter
from datadog_api_client.v2.model.logs_sort import LogsSort
except ImportError as ex:
raise ImportError(
"Could not import datadog_api_client python package. "
"Please install it with `pip install datadog_api_client`."
) from ex
now = datetime.now()
twenty_minutes_before = now - timedelta(minutes=20)
now_timestamp = int(now.timestamp() * 1000)
twenty_minutes_before_timestamp = int(twenty_minutes_before.timestamp() * 1000)
_from = (
self.from_time
if self.from_time is not None
else twenty_minutes_before_timestamp
)
body = LogsListRequest(
filter=LogsQueryFilter(
query=self.query,
_from=_from,
to=f"{self.to_time if self.to_time is not None else now_timestamp}",
),
sort=LogsSort.TIMESTAMP_ASCENDING,
page=LogsListRequestPage(
limit=self.limit,
),
)
with ApiClient(configuration=self.configuration) as api_client:
api_instance = LogsApi(api_client)
response = api_instance.list_logs(body=body).to_dict()
docs: List[Document] = []
for row in response["data"]:
docs.append(self.parse_log(row))
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~agent_toolkits~openapi~planner_prompt.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
API_PLANNER_PROMPT = """Ты планировщик, который планирует последовательность вызовов API для помощи в пользовательских запросах к API.
Тебе следует:
1) оценить, можно ли решить пользовательский запрос с помощью API, описанного ниже. Если нет, объясни почему.
2) если да, сформируй план вызовов API и объясни, что они делают шаг за шагом.
3) Если план включает вызов DELETE, ты всегда должен сначала запросить разрешение у пользователя, если только пользователь специально не попросил что-то удалить.
Ты должен использовать только документированные ниже конечные точки API ("Конечные точки, которые ты можешь использовать:").
Ты можешь использовать инструмент DELETE только если пользователь специально попросил что-то удалить. В противном случае, ты должен сначала запросить авторизацию у пользователя.
Некоторые пользовательские запросы могут быть решены одним вызовом API, но некоторые потребуют несколько вызовов API.
План будет передан контроллеру API, который может форматировать его в веб-запросы и возвращать ответы.
----
Вот некоторые примеры:
Фиктивные конечные точки для примеров:
GET /user для получения информации о текущем пользователе
GET /products/search для поиска по продуктам
POST /users/{{id}}/cart для добавления продуктов в корзину пользователя
PATCH /users/{{id}}/cart для обновления корзины пользователя
DELETE /users/{{id}}/cart для удаления корзины пользователя
Пользовательский запрос: расскажи мне шутку
План: Извини, областью этого API является шопинг, а не комедия.
Пользовательский запрос: я хочу купить диван
План: 1. GET /products с параметром запроса для поиска диванов
2. GET /user для поиска id пользователя
3. POST /users/{{id}}/cart для добавления дивана в корзину пользователя
Пользовательский запрос: я хочу добавить лампу в свою корзину
План: 1. GET /products с параметром запроса для поиска ламп
2. GET /user для поиска id пользователя
3. PATCH /users/{{id}}/cart для добавления лампы в корзину пользователя
Пользовательский запрос: я хочу удалить свою корзину
План: 1. GET /user для поиска id пользователя
2. Требуется DELETE. Пользователь указал DELETE или ранее авторизовал? Да, продолжить.
3. DELETE /users/{{id}}/cart для удаления корзины пользователя
Пользовательский запрос: я хочу начать новую корзину
План: 1. GET /user для поиска id пользователя
2. Требуется DELETE. Пользователь указал DELETE или ранее авторизовал? Нет, запросить авторизацию.
3. Ты уверен, что хочешь удалить свою корзину?
----
Вот конечные точки, которые ты можешь использовать. Не ссылайся на любые из конечных точек выше.
{endpoints}
----
Пользовательский запрос: {query}
План:"""
API_PLANNER_TOOL_NAME = "api_planner"
API_PLANNER_TOOL_DESCRIPTION = f"Может быть использован для генерации правильных вызовов API для помощи в пользовательском запросе, например {API_PLANNER_TOOL_NAME}(query). Всегда должен быть вызван перед попыткой вызвать контроллер API."
# Execution.
API_CONTROLLER_PROMPT = """Ты агент, который получает последовательность вызовов API и, имея их документацию, должен выполнить их и вернуть окончательный ответ.
Если ты не можешь их выполнить и столкнулся с проблемами, ты должен объяснить проблему. Если ты не можешь выполнить вызов API, ты можешь повторить вызов API. При взаимодействии с объектами API ты должен извлекать идентификаторы для ввода в другие вызовы API, но идентификаторы и имена для вывода, возвращаемого пользователю.
Вот документация по API:
Базовый URL: {api_url}
Конечные точки:
{api_docs}
Вот инструменты для выполнения запросов к API: {tool_descriptions}
Начиная ниже, ты должен следовать этому формату:
План: план вызовов API для выполнения
Thought: ты всегда должен думать о том, что делать
Action: действие, которое следует предпринять, должно быть одним из инструментов [{tool_names}]
Action Input: ввод для действия
Observation: результат действия
... (это Thought/Action/Action Input/Observation может повторяться N раз)
Thought: я закончил выполнение плана (или, я не могу закончить выполнение плана, не зная некоторой другой информации.)
Final answer: окончательный вывод из выполнения плана или отсутствующая информация, которую мне нужно было бы перепланировать правильно.
Начнем!
План: {input}
Thought:
{agent_scratchpad}
"""
API_CONTROLLER_TOOL_NAME = "api_controller"
API_CONTROLLER_TOOL_DESCRIPTION = f"Может быть использован для выполнения плана вызовов API, например {API_CONTROLLER_TOOL_NAME}(plan)."
# Orchestrate planning + execution.
# The goal is to have an agent at the top-level (e.g. so it can recover from errors and re-plan) while
# keeping planning (and specifically the planning prompt) simple.
API_ORCHESTRATOR_PROMPT = """Ты агент, который помогает с пользовательскими запросами к API, такими как запрос информации или создание ресурсов.
Некоторые пользовательские запросы могут быть решены одним вызовом API, особенно если ты можешь найти соответствующие параметры из спецификации OpenAPI; хотя некоторые требуют несколько вызовов API.
Ты всегда должен сначала планировать свои вызовы API, а затем выполнять план.
Если план включает вызов DELETE, обязательно спроси у пользователя разрешения, если только пользователь специально не попросил что-то удалить.
Ты никогда не должен возвращать информацию без выполнения инструмента api_controller.
Вот инструменты для планирования и выполнения запросов API: {tool_descriptions}
Начиная ниже, ты должен следовать этому формату:
Пользовательский запрос: запрос, с которым пользователь хочет помощи, связанный с API
Thought: ты всегда должен думать о том, что делать
Action: действие, которое следует предпринять, должно быть одним из инструментов [{tool_names}]
Action Input: ввод для действия
Observation: результат действия
... (это Thought/Action/Action Input/Observation может повторяться N раз)
Thought: я закончил выполнение плана и имею информацию, которую пользователь просил, или данные, которые пользователь просил создать
Final answer: окончательный вывод из выполнения плана
Пример:
Пользовательский запрос: можешь ли ты добавить несколько модных вещей в мою корзину покупок.
Thought: я должен сначала спланировать вызовы API.
Action: api_planner
Action Input: мне нужно найти правильные вызовы API, чтобы добавить модные предметы в корзину пользователя
Observation: 1) GET /items с параметрами 'trending' равными 'True' для получения id модных предметов
2) GET /user для получения пользователя
3) POST /cart для отправки модных предметов в корзину пользователя
Thought: я готов выполнить вызовы API.
Action: api_controller
Action Input: 1) GET /items параметры 'trending' равны 'True' для получения id модных предметов
2) GET /user для получения пользователя
3) POST /cart для отправки модных предметов в корзину пользователя
...
Начнем!
Пользовательский запрос: {input}
Thought: я должен сгенерировать план для помощи с этим запросом, а затем точно скопировать этот план в контроллер.
{agent_scratchpad}"""
REQUESTS_GET_TOOL_DESCRIPTION = """Используй это для получения контента с веб-сайта.
Ввод для инструмента должен быть строкой json с 3 ключами: "url", "params" и "output_instructions".
Значение "url" должно быть строкой.
Значение "params" должно быть словарем необходимых и доступных параметров из спецификации OpenAPI, связанных с конечной точкой.
Если параметры не нужны или не доступны, оставь его пустым.
Значение "output_instructions" должно быть инструкциями о том, какую информацию извлечь из ответа,
например id(s) для ресурса(ов), который получает запрос GET.
"""
PARSING_GET_PROMPT = PromptTemplate(
template="""Вот ответ API:\n\n{response}\n\n====
Твоя задача - извлечь некоторую информацию в соответствии с этими инструкциями: {instructions}
При работе с объектами API ты обычно должен использовать id вместо имен.
Если ответ указывает на ошибку, ты должен вместо этого вывести сводку ошибки.
Вывод:""",
input_variables=["response", "instructions"],
)
REQUESTS_POST_TOOL_DESCRIPTION = """Используй это, когда хочешь POST на веб-сайт.
Ввод для инструмента должен быть строкой json с 3 ключами: "url", "data" и "output_instructions".
Значение "url" должно быть строкой.
Значение "data" должно быть словарем пар ключ-значение, которые ты хочешь POST на url.
Значение "output_instructions" должно быть инструкциями о том, какую информацию извлечь из ответа, например id(s) для ресурса(ов), который создает запрос POST.
Всегда используй двойные кавычки для строк в строке json."""
PARSING_POST_PROMPT = PromptTemplate(
template="""Вот ответ API:\n\n{response}\n\n====
Твоя задача - извлечь некоторую информацию в соответствии с этими инструкциями: {instructions}
При работе с объектами API ты обычно должен использовать id вместо имен. Не возвращай никаких id или имен, которых нет в ответе.
Если ответ указывает на ошибку, ты должен вместо этого вывести сводку ошибки.
Вывод:""",
input_variables=["response", "instructions"],
)
REQUESTS_PATCH_TOOL_DESCRIPTION = """Используй это, когда хочешь PATCH контент на веб-сайте.
Ввод для инструмента должен быть строкой json с 3 ключами: "url", "data" и "output_instructions".
Значение "url" должно быть строкой.
Значение "data" должно быть словарем пар ключ-значение параметров тела, доступных в спецификации OpenAPI, которыми ты хочешь PATCH контент на url.
Значение "output_instructions" должно быть инструкциями о том, какую информацию извлечь из ответа, например id(s) для ресурса(ов), который создает запрос PATCH.
Всегда используй двойные кавычки для строк в строке json."""
PARSING_PATCH_PROMPT = PromptTemplate(
template="""Вот ответ API:\n\n{response}\n\n====
Твоя задача - извлечь некоторую информацию в соответствии с этими инструкциями: {instructions}
При работе с объектами API ты обычно должен использовать id вместо имен. Не возвращай никаких id или имен, которых нет в ответе.
Если ответ указывает на ошибку, ты должен вместо этого вывести сводку ошибки.
Вывод:""",
input_variables=["response", "instructions"],
)
REQUESTS_DELETE_TOOL_DESCRIPTION = """ИСПОЛЬЗУЙ ЭТОТ ИНСТРУМЕНТ ТОЛЬКО КОГДА ПОЛЬЗОВАТЕЛЬ СПЕЦИАЛЬНО ПОПРОСИЛ УДАЛИТЬ КОНТЕНТ С ВЕБ-САЙТА.
Ввод для инструмента должен быть строкой json с 2 ключами: "url" и "output_instructions".
Значение "url" должно быть строкой.
Значение "output_instructions" должно быть инструкциями о том, какую информацию извлечь из ответа, например id(s) для ресурса(ов), который создает запрос DELETE.
Всегда используй двойные кавычки для строк в строке json.
ИСПОЛЬЗУЙ ЭТОТ ИНСТРУМЕНТ ТОЛЬКО ЕСЛИ ПОЛЬЗОВАТЕЛЬ СПЕЦИАЛЬНО ПОПРОСИЛ ЧТО-ТО УДАЛИТЬ."""
REQUESTS_PUT_TOOL_DESCRIPTION = """Используйте это, когда вы хотите выполнить PUT-запрос на веб-сайт.
Ввод для инструмента должен быть строкой json с 3 ключами: "url", "data" и "output_instructions".
Значение "url" должно быть строкой.
Значение "data" должно быть словарем пар ключ-значение, которые вы хотите отправить на url с использованием PUT.
Значение "output_instructions" должно содержать инструкции о том, какую информацию извлечь из ответа, например, идентификаторы ресурсов, созданных с помощью PUT-запроса.
Всегда используйте двойные кавычки для строк в строке json."""
PARSING_PUT_PROMPT = PromptTemplate(
template="""Вот ответ API:\n\n{response}\n\n====
Ваша задача - извлечь некоторую информацию согласно этим инструкциям: {instructions}
Работая с объектами API, вы обычно должны использовать идентификаторы вместо имен. Не возвращайте идентификаторы или имена, которых нет в ответе.
Если ответ указывает на ошибку, вместо этого вы должны вывести резюме ошибки.
Вывод:""",
input_variables=["response", "instructions"],
)
PARSING_DELETE_PROMPT = PromptTemplate(
template="""Вот ответ API:\n\n{response}\n\n====
Твоя задача - извлечь некоторую информацию в соответствии с этими инструкциями: {instructions}
При работе с объектами API ты обычно должен использовать id вместо имен. Не возвращай никаких id или имен, которых нет в ответе.
Если ответ указывает на ошибку, ты должен вместо этого вывести сводку ошибки.
Вывод:""",
input_variables=["response", "instructions"],
)
| [
"Ты планировщик, который планирует последовательность вызовов API для помощи в пользовательских запросах к API.\n\nТебе следует:\n1) оценить, можно ли решить пользовательский запрос с помощью API, описанного ниже. Если нет, объясни почему.\n2) если да, сформируй план вызовов API и объясни, что они делают шаг за шагом.\n3) Если план включает вызов DELETE, ты всегда должен сначала запросить разрешение у пользователя, если только пользователь специально не попросил что-то удалить.\n\nТы должен использовать только документированные ниже конечные точки API (\"Конечные точки, которые ты можешь использовать:\").\nТы можешь использовать инструмент DELETE только если пользователь специально попросил что-то удалить. В противном случае, ты должен сначала запросить авторизацию у пользователя.\nНекоторые пользовательские запросы могут быть решены одним вызовом API, но некоторые потребуют несколько вызовов API.\nПлан будет передан контроллеру API, который может форматировать его в веб-запросы и возвращать ответы.\n\n----\n\nВот некоторые примеры:\n\nФиктивные конечные точки для примеров:\nGET /user для получения информации о текущем пользователе\nGET /products/search для поиска по продуктам\nPOST /users/{{id}}/cart для добавления продуктов в корзину пользователя\nPATCH /users/{{id}}/cart для обновления корзины пользователя\nDELETE /users/{{id}}/cart для удаления корзины пользователя\n\nПользовательский запрос: расскажи мне шутку\nПлан: Извини, областью этого API является шопинг, а не комедия.\n\nПользовательский запрос: я хочу купить диван\nПлан: 1. GET /products с параметром запроса для поиска диванов\n2. GET /user для поиска id пользователя\n3. POST /users/{{id}}/cart для добавления дивана в корзину пользователя\n\nПользовательский запрос: я хочу добавить лампу в свою корзину\nПлан: 1. GET /products с параметром запроса для поиска ламп\n2. GET /user для поиска id пользователя\n3. PATCH /users/{{id}}/cart для добавления лампы в корзину пользователя\n\nПользовательский запрос: я хочу удалить свою корзину\nПлан: 1. GET /user для поиска id пользователя\n2. Требуется DELETE. Пользователь указал DELETE или ранее авторизовал? Да, продолжить.\n3. DELETE /users/{{id}}/cart для удаления корзины пользователя\n\nПользовательский запрос: я хочу начать новую корзину\nПлан: 1. GET /user для поиска id пользователя\n2. Требуется DELETE. Пользователь указал DELETE или ранее авторизовал? Нет, запросить авторизацию.\n3. Ты уверен, что хочешь удалить свою корзину? \n----\n\nВот конечные точки, которые ты можешь использовать. Не ссылайся на любые из конечных точек выше.\n\n{endpoints}\n\n----\n\nПользовательский запрос: {query}\nПлан:",
"instructions",
"Вот ответ API:\n\n{response}\n\n====\nТвоя задача - извлечь некоторую информацию в соответствии с этими инструкциями: {instructions}\nПри работе с объектами API ты обычно должен использовать id вместо имен.\nЕсли ответ указывает на ошибку, ты должен вместо этого вывести сводку ошибки.\n\nВывод:",
"Ты агент, который помогает с пользовательскими запросами к API, такими как запрос информации или создание ресурсов.\nНекоторые пользовательские запросы могут быть решены одним вызовом API, особенно если ты можешь найти соответствующие параметры из спецификации OpenAPI; хотя некоторые требуют несколько вызовов API.\nТы всегда должен сначала планировать свои вызовы API, а затем выполнять план.\nЕсли план включает вызов DELETE, обязательно спроси у пользователя разрешения, если только пользователь специально не попросил что-то удалить.\nТы никогда не должен возвращать информацию без выполнения инструмента api_controller.\n\n\nВот инструменты для планирования и выполнения запросов API: {tool_descriptions}\n\n\nНачиная ниже, ты должен следовать этому формату:\n\nПользовательский запрос: запрос, с которым пользователь хочет помощи, связанный с API\nThought: ты всегда должен думать о том, что делать\nAction: действие, которое следует предпринять, должно быть одним из инструментов [{tool_names}]\nAction Input: ввод для действия\nObservation: результат действия\n... (это Thought/Action/Action Input/Observation может повторяться N раз)\nThought: я закончил выполнение плана и имею информацию, которую пользователь просил, или данные, которые пользователь просил создать\nFinal answer: окончательный вывод из выполнения плана\n\n\nПример:\nПользовательский запрос: можешь ли ты добавить несколько модных вещей в мою корзину покупок.\nThought: я должен сначала спланировать вызовы API.\nAction: api_planner\nAction Input: мне нужно найти правильные вызовы API, чтобы добавить модные предметы в корзину пользователя\nObservation: 1) GET /items с параметрами 'trending' равными 'True' для получения id модных предметов\n2) GET /user для получения пользователя\n3) POST /cart для отправки модных предметов в корзину пользователя\nThought: я готов выполнить вызовы API.\nAction: api_controller\nAction Input: 1) GET /items параметры 'trending' равны 'True' для получения id модных предметов\n2) GET /user для получения пользователя\n3) POST /cart для отправки модных предметов в корзину пользователя\n...\n\nНачнем!\n\nПользовательский запрос: {input}\nThought: я должен сгенерировать план для помощи с этим запросом, а затем точно скопировать этот план в контроллер.\n{agent_scratchpad}",
"response",
"Ты агент, который получает последовательность вызовов API и, имея их документацию, должен выполнить их и вернуть окончательный ответ.\nЕсли ты не можешь их выполнить и столкнулся с проблемами, ты должен объяснить проблему. Если ты не можешь выполнить вызов API, ты можешь повторить вызов API. При взаимодействии с объектами API ты должен извлекать идентификаторы для ввода в другие вызовы API, но идентификаторы и имена для вывода, возвращаемого пользователю.\n\n\nВот документация по API:\nБазовый URL: {api_url}\nКонечные точки:\n{api_docs}\n\n\nВот инструменты для выполнения запросов к API: {tool_descriptions}\n\n\nНачиная ниже, ты должен следовать этому формату:\n\nПлан: план вызовов API для выполнения\nThought: ты всегда должен думать о том, что делать\nAction: действие, которое следует предпринять, должно быть одним из инструментов [{tool_names}]\nAction Input: ввод для действия\nObservation: результат действия\n... (это Thought/Action/Action Input/Observation может повторяться N раз)\nThought: я закончил выполнение плана (или, я не могу закончить выполнение плана, не зная некоторой другой информации.)\nFinal answer: окончательный вывод из выполнения плана или отсутствующая информация, которую мне нужно было бы перепланировать правильно.\n\n\nНачнем!\n\nПлан: {input}\nThought:\n{agent_scratchpad}\n",
"Вот ответ API:\n\n{response}\n\n====\nВаша задача - извлечь некоторую информацию согласно этим инструкциям: {instructions}\nРаботая с объектами API, вы обычно должны использовать идентификаторы вместо имен. Не возвращайте идентификаторы или имена, которых нет в ответе.\nЕсли ответ указывает на ошибку, вместо этого вы должны вывести резюме ошибки.\n\nВывод:",
"Вот ответ API:\n\n{response}\n\n====\nТвоя задача - извлечь некоторую информацию в соответствии с этими инструкциями: {instructions}\nПри работе с объектами API ты обычно должен использовать id вместо имен. Не возвращай никаких id или имен, которых нет в ответе.\nЕсли ответ указывает на ошибку, ты должен вместо этого вывести сводку ошибки.\n\nВывод:"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~flyte_callback.py | """FlyteKit callback handler."""
from __future__ import annotations
import logging
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Dict, List, Tuple
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
import_pandas,
import_spacy,
import_textstat,
)
from langchain.schema import AgentAction, AgentFinish, LLMResult
if TYPE_CHECKING:
import flytekit
from flytekitplugins.deck import renderer
logger = logging.getLogger(__name__)
def import_flytekit() -> Tuple[flytekit, renderer]:
"""Import flytekit and flytekitplugins-deck-standard."""
try:
import flytekit # noqa: F401
from flytekitplugins.deck import renderer # noqa: F401
except ImportError:
raise ImportError(
"To use the flyte callback manager you need"
"to have the `flytekit` and `flytekitplugins-deck-standard`"
"packages installed. Please install them with `pip install flytekit`"
"and `pip install flytekitplugins-deck-standard`."
)
return flytekit, renderer
def analyze_text(
text: str,
nlp: Any = None,
textstat: Any = None,
) -> dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
"""
resp: Dict[str, Any] = {}
if textstat is not None:
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(text),
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
resp.update({"text_complexity_metrics": text_complexity_metrics})
resp.update(text_complexity_metrics)
if nlp is not None:
spacy = import_spacy()
doc = nlp(text)
dep_out = spacy.displacy.render( # type: ignore
doc, style="dep", jupyter=False, page=True
)
ent_out = spacy.displacy.render( # type: ignore
doc, style="ent", jupyter=False, page=True
)
text_visualizations = {
"dependency_tree": dep_out,
"entities": ent_out,
}
resp.update(text_visualizations)
return resp
class FlyteCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""This callback handler that is used within a Flyte task."""
def __init__(self) -> None:
"""Initialize callback handler."""
flytekit, renderer = import_flytekit()
self.pandas = import_pandas()
self.textstat = None
try:
self.textstat = import_textstat()
except ImportError:
logger.warning(
"Textstat library is not installed. \
It may result in the inability to log \
certain metrics that can be captured with Textstat."
)
spacy = None
try:
spacy = import_spacy()
except ImportError:
logger.warning(
"Spacy library is not installed. \
It may result in the inability to log \
certain metrics that can be captured with Spacy."
)
super().__init__()
self.nlp = None
if spacy:
try:
self.nlp = spacy.load("en_core_web_sm")
except OSError:
logger.warning(
"FlyteCallbackHandler uses spacy's en_core_web_sm model"
" for certain metrics. To download,"
" run the following command in your terminal:"
" `python -m spacy download en_core_web_sm`"
)
self.table_renderer = renderer.TableRenderer
self.markdown_renderer = renderer.MarkdownRenderer
self.deck = flytekit.Deck(
"LangChain Metrics",
self.markdown_renderer().to_html("## LangChain Metrics"),
)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
prompt_responses = []
for prompt in prompts:
prompt_responses.append(prompt)
resp.update({"prompts": prompt_responses})
self.deck.append(self.markdown_renderer().to_html("### LLM Start"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### LLM End"))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp])))
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
if self.nlp or self.textstat:
generation_resp.update(
analyze_text(
generation.text, nlp=self.nlp, textstat=self.textstat
)
)
complexity_metrics: Dict[str, float] = generation_resp.pop("text_complexity_metrics") # type: ignore # noqa: E501
self.deck.append(
self.markdown_renderer().to_html("#### Text Complexity Metrics")
)
self.deck.append(
self.table_renderer().to_html(
self.pandas.DataFrame([complexity_metrics])
)
+ "\n"
)
dependency_tree = generation_resp["dependency_tree"]
self.deck.append(
self.markdown_renderer().to_html("#### Dependency Tree")
)
self.deck.append(dependency_tree)
entities = generation_resp["entities"]
self.deck.append(self.markdown_renderer().to_html("#### Entities"))
self.deck.append(entities)
else:
self.deck.append(
self.markdown_renderer().to_html("#### Generated Response")
)
self.deck.append(self.markdown_renderer().to_html(generation.text))
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp["inputs"] = chain_input
self.deck.append(self.markdown_renderer().to_html("### Chain Start"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([input_resp])) + "\n"
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()])
resp.update({"action": "on_chain_end", "outputs": chain_output})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Chain End"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Tool Start"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.step += 1
self.tool_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Tool End"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_text", "text": text})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### On Text"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Agent Finish"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Agent Action"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
| [
"[]"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~beam.py | import base64
import json
import logging
import subprocess
import textwrap
import time
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
DEFAULT_NUM_TRIES = 10
DEFAULT_SLEEP_TIME = 4
class Beam(LLM):
"""Beam API for gpt2 large language model.
To use, you should have the ``beam-sdk`` python package installed,
and the environment variable ``BEAM_CLIENT_ID`` set with your client id
and ``BEAM_CLIENT_SECRET`` set with your client secret. Information on how
to get this is available here: https://docs.beam.cloud/account/api-keys.
The wrapper can then be called as follows, where the name, cpu, memory, gpu,
python version, and python packages can be updated accordingly. Once deployed,
the instance can be called.
Example:
.. code-block:: python
llm = Beam(model_name="gpt2",
name="langchain-gpt2",
cpu=8,
memory="32Gi",
gpu="A10G",
python_version="python3.8",
python_packages=[
"diffusers[torch]>=0.10",
"transformers",
"torch",
"pillow",
"accelerate",
"safetensors",
"xformers",],
max_length=50)
llm._deploy()
call_result = llm._call(input)
"""
model_name: str = ""
name: str = ""
cpu: str = ""
memory: str = ""
gpu: str = ""
python_version: str = ""
python_packages: List[str] = []
max_length: str = ""
url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
beam_client_id: str = ""
beam_client_secret: str = ""
app_id: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
beam_client_id = get_from_dict_or_env(
values, "beam_client_id", "BEAM_CLIENT_ID"
)
beam_client_secret = get_from_dict_or_env(
values, "beam_client_secret", "BEAM_CLIENT_SECRET"
)
values["beam_client_id"] = beam_client_id
values["beam_client_secret"] = beam_client_secret
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model_name,
"name": self.name,
"cpu": self.cpu,
"memory": self.memory,
"gpu": self.gpu,
"python_version": self.python_version,
"python_packages": self.python_packages,
"max_length": self.max_length,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "beam"
def app_creation(self) -> None:
"""Creates a Python file which will contain your Beam app definition."""
script = textwrap.dedent(
"""\
import beam
# The environment your code will run on
app = beam.App(
name="{name}",
cpu={cpu},
memory="{memory}",
gpu="{gpu}",
python_version="{python_version}",
python_packages={python_packages},
)
app.Trigger.RestAPI(
inputs={{"prompt": beam.Types.String(), "max_length": beam.Types.String()}},
outputs={{"text": beam.Types.String()}},
handler="run.py:beam_langchain",
)
"""
)
script_name = "app.py"
with open(script_name, "w") as file:
file.write(
script.format(
name=self.name,
cpu=self.cpu,
memory=self.memory,
gpu=self.gpu,
python_version=self.python_version,
python_packages=self.python_packages,
)
)
def run_creation(self) -> None:
"""Creates a Python file which will be deployed on beam."""
script = textwrap.dedent(
"""
import os
import transformers
from transformers import GPT2LMHeadModel, GPT2Tokenizer
model_name = "{model_name}"
def beam_langchain(**inputs):
prompt = inputs["prompt"]
length = inputs["max_length"]
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
encodedPrompt = tokenizer.encode(prompt, return_tensors='pt')
outputs = model.generate(encodedPrompt, max_length=int(length),
do_sample=True, pad_token_id=tokenizer.eos_token_id)
output = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(output)
return {{"text": output}}
"""
)
script_name = "run.py"
with open(script_name, "w") as file:
file.write(script.format(model_name=self.model_name))
def _deploy(self) -> str:
"""Call to Beam."""
try:
import beam # type: ignore
if beam.__path__ == "":
raise ImportError
except ImportError:
raise ImportError(
"Could not import beam python package. "
"Please install it with `curl "
"https://raw.githubusercontent.com/slai-labs"
"/get-beam/main/get-beam.sh -sSfL | sh`."
)
self.app_creation()
self.run_creation()
process = subprocess.run(
"beam deploy app.py", shell=True, capture_output=True, text=True
)
if process.returncode == 0:
output = process.stdout
logger.info(output)
lines = output.split("\n")
for line in lines:
if line.startswith(" i Send requests to: https://apps.beam.cloud/"):
self.app_id = line.split("/")[-1]
self.url = line.split(":")[1].strip()
return self.app_id
raise ValueError(
f"""Failed to retrieve the appID from the deployment output.
Deployment output: {output}"""
)
else:
raise ValueError(f"Deployment failed. Error: {process.stderr}")
@property
def authorization(self) -> str:
if self.beam_client_id:
credential_str = self.beam_client_id + ":" + self.beam_client_secret
else:
credential_str = self.beam_client_secret
return base64.b64encode(credential_str.encode()).decode()
def _call(
self,
prompt: str,
stop: Optional[list] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Beam."""
url = "https://apps.beam.cloud/" + self.app_id if self.app_id else self.url
payload = {"prompt": prompt, "max_length": self.max_length}
payload.update(kwargs)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Authorization": "Basic " + self.authorization,
"Connection": "keep-alive",
"Content-Type": "application/json",
}
for _ in range(DEFAULT_NUM_TRIES):
request = requests.post(url, headers=headers, data=json.dumps(payload))
if request.status_code == 200:
return request.json()["text"]
time.sleep(DEFAULT_SLEEP_TIME)
logger.warning("Unable to successfully call model.")
return ""
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~parsers~grobid.py | import logging
from typing import Dict, Iterator, List, Union
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
logger = logging.getLogger(__name__)
class ServerUnavailableException(Exception):
"""Exception raised when the Grobid server is unavailable."""
pass
class GrobidParser(BaseBlobParser):
"""Load article `PDF` files using `Grobid`."""
def __init__(
self,
segment_sentences: bool,
grobid_server: str = "http://localhost:8070/api/processFulltextDocument",
) -> None:
self.segment_sentences = segment_sentences
self.grobid_server = grobid_server
try:
requests.get(grobid_server)
except requests.exceptions.RequestException:
logger.error(
"GROBID server does not appear up and running, \
please ensure Grobid is installed and the server is running"
)
raise ServerUnavailableException
def process_xml(
self, file_path: str, xml_data: str, segment_sentences: bool
) -> Iterator[Document]:
"""Process the XML file from Grobin."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"`bs4` package not found, please install it with " "`pip install bs4`"
)
soup = BeautifulSoup(xml_data, "xml")
sections = soup.find_all("div")
title = soup.find_all("title")[0].text
chunks = []
for section in sections:
sect = section.find("head")
if sect is not None:
for i, paragraph in enumerate(section.find_all("p")):
chunk_bboxes = []
paragraph_text = []
for i, sentence in enumerate(paragraph.find_all("s")):
paragraph_text.append(sentence.text)
sbboxes = []
for bbox in sentence.get("coords").split(";"):
box = bbox.split(",")
sbboxes.append(
{
"page": box[0],
"x": box[1],
"y": box[2],
"h": box[3],
"w": box[4],
}
)
chunk_bboxes.append(sbboxes)
if segment_sentences is True:
fpage, lpage = sbboxes[0]["page"], sbboxes[-1]["page"]
sentence_dict = {
"text": sentence.text,
"para": str(i),
"bboxes": [sbboxes],
"section_title": sect.text,
"section_number": sect.get("n"),
"pages": (fpage, lpage),
}
chunks.append(sentence_dict)
if segment_sentences is not True:
fpage, lpage = (
chunk_bboxes[0][0]["page"],
chunk_bboxes[-1][-1]["page"],
)
paragraph_dict = {
"text": "".join(paragraph_text),
"para": str(i),
"bboxes": chunk_bboxes,
"section_title": sect.text,
"section_number": sect.get("n"),
"pages": (fpage, lpage),
}
chunks.append(paragraph_dict)
yield from [
Document(
page_content=chunk["text"],
metadata=dict(
{
"text": str(chunk["text"]),
"para": str(chunk["para"]),
"bboxes": str(chunk["bboxes"]),
"pages": str(chunk["pages"]),
"section_title": str(chunk["section_title"]),
"section_number": str(chunk["section_number"]),
"paper_title": str(title),
"file_path": str(file_path),
}
),
)
for chunk in chunks
]
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
file_path = blob.source
if file_path is None:
raise ValueError("blob.source cannot be None.")
pdf = open(file_path, "rb")
files = {"input": (file_path, pdf, "application/pdf", {"Expires": "0"})}
try:
data: Dict[str, Union[str, List[str]]] = {}
for param in ["generateIDs", "consolidateHeader", "segmentSentences"]:
data[param] = "1"
data["teiCoordinates"] = ["head", "s"]
files = files or {}
r = requests.request(
"POST",
self.grobid_server,
headers=None,
params=None,
files=files,
data=data,
timeout=60,
)
xml_data = r.text
except requests.exceptions.ReadTimeout:
logger.error("GROBID server timed out. Return None.")
xml_data = None
if xml_data is None:
return iter([])
else:
return self.process_xml(file_path, xml_data, self.segment_sentences)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~javelin_ai_gateway.py | from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.pydantic_v1 import BaseModel, Extra
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class Params(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""Parameters for the Javelin AI Gateway LLM."""
temperature: float = 0.0
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class JavelinAIGateway(LLM):
"""Javelin AI Gateway LLMs.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain.llms import JavelinAIGateway
completions = JavelinAIGateway(
gateway_uri="<your-javelin-ai-gateway-uri>",
route="<your-javelin-ai-gateway-completions-route>",
params={
"temperature": 0.1
}
)
"""
route: str
"""The route to use for the Javelin AI Gateway API."""
client: Optional[Any] = None
"""The Javelin AI Gateway client."""
gateway_uri: Optional[str] = None
"""The URI of the Javelin AI Gateway API."""
params: Optional[Params] = None
"""Parameters for the Javelin AI Gateway API."""
javelin_api_key: Optional[str] = None
"""The API key for the Javelin AI Gateway API."""
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri, api_key=self.javelin_api_key
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Javelin AI Gateway API."""
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"route": self.route,
"javelin_api_key": self.javelin_api_key,
**(self.params.dict() if self.params else {}),
}
return params
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return self._default_params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Javelin AI Gateway API."""
data: Dict[str, Any] = {
"prompt": prompt,
**(self.params.dict() if self.params else {}),
}
if s := (stop or (self.params.stop if self.params else None)):
data["stop"] = s
if self.client is not None:
resp = self.client.query_route(self.route, query_body=data)
else:
raise ValueError("Javelin client is not initialized.")
resp_dict = resp.dict()
try:
return resp_dict["llm_response"]["choices"][0]["text"]
except KeyError:
return ""
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call async the Javelin AI Gateway API."""
data: Dict[str, Any] = {
"prompt": prompt,
**(self.params.dict() if self.params else {}),
}
if s := (stop or (self.params.stop if self.params else None)):
data["stop"] = s
if self.client is not None:
resp = await self.client.aquery_route(self.route, query_body=data)
else:
raise ValueError("Javelin client is not initialized.")
resp_dict = resp.dict()
try:
return resp_dict["llm_response"]["choices"][0]["text"]
except KeyError:
return ""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "javelin-ai-gateway"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~edenai~image_objectdetection.py | from __future__ import annotations
import logging
from typing import Optional
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class EdenAiObjectDetectionTool(EdenaiTool):
"""Tool that queries the Eden AI Object detection API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_object_detection_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name = "edenai_object_detection"
description = (
"A wrapper around edenai Services Object Detection . "
"""Useful for when you have to do an to identify and locate
(with bounding boxes) objects in an image """
"Input should be the string url of the image to identify."
)
show_positions: bool = False
feature = "image"
subfeature = "object_detection"
def _parse_json(self, json_data: dict) -> str:
result = []
label_info = []
for found_obj in json_data["items"]:
label_str = f"{found_obj['label']} - Confidence {found_obj['confidence']}"
x_min = found_obj.get("x_min")
x_max = found_obj.get("x_max")
y_min = found_obj.get("y_min")
y_max = found_obj.get("y_max")
if self.show_positions and all(
[x_min, x_max, y_min, y_max]
): # some providers don't return positions
label_str += f""",at the position x_min: {x_min}, x_max: {x_max},
y_min: {y_min}, y_max: {y_max}"""
label_info.append(label_str)
result.append("\n".join(label_info))
return "\n\n".join(result)
def _parse_response(self, response: list) -> str:
if len(response) == 1:
result = self._parse_json(response[0])
else:
for entry in response:
if entry.get("provider") == "eden-ai":
result = self._parse_json(entry)
return result
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"file_url": query, "attributes_as_list": False}
return self._call_eden_ai(query_params)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~hub.py | """Interface with the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain.load.dump import dumps
from langchain.load.load import loads
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
try:
from langchainhub import Client
except ImportError as e:
raise ImportError(
"Could not import langchainhub, please install with `pip install "
"langchainhub`."
) from e
# Client logic will also attempt to load URL/key from environment variables
return Client(api_url, api_key=api_key)
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = "latest",
new_repo_is_public: bool = True,
new_repo_description: str = "",
) -> str:
"""
Pushes an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the repo to push to in the format of
`owner/repo`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the repo should be public. Defaults to
True (Public by default).
:param new_repo_description: The description of the repo. Defaults to an empty
string.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pulls an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the repo to pull from in the format of
`owner/repo:commit_hash`.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_url=api_url, api_key=api_key)
resp: str = client.pull(owner_repo_commit)
return loads(resp)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~brave_search.py | from typing import Iterator, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.brave_search import BraveSearchWrapper
class BraveSearchLoader(BaseLoader):
"""Load with `Brave Search` engine."""
def __init__(self, query: str, api_key: str, search_kwargs: Optional[dict] = None):
"""Initializes the BraveLoader.
Args:
query: The query to search for.
api_key: The API key to use.
search_kwargs: The search kwargs to use.
"""
self.query = query
self.api_key = api_key
self.search_kwargs = search_kwargs or {}
def load(self) -> List[Document]:
brave_client = BraveSearchWrapper(
api_key=self.api_key,
search_kwargs=self.search_kwargs,
)
return brave_client.download_documents(self.query)
def lazy_load(self) -> Iterator[Document]:
for doc in self.load():
yield doc
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~nlpcloud.py | from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class NLPCloud(LLM):
"""NLPCloud large language models.
To use, you should have the ``nlpcloud`` python package installed, and the
environment variable ``NLPCLOUD_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain.llms import NLPCloud
nlpcloud = NLPCloud(model="finetuned-gpt-neox-20b")
"""
client: Any #: :meta private:
model_name: str = "finetuned-gpt-neox-20b"
"""Model name to use."""
gpu: bool = True
"""Whether to use a GPU or not"""
lang: str = "en"
"""Language to use (multilingual addon)"""
temperature: float = 0.7
"""What sampling temperature to use."""
max_length: int = 256
"""The maximum number of tokens to generate in the completion."""
length_no_input: bool = True
"""Whether min_length and max_length should include the length of the input."""
remove_input: bool = True
"""Remove input text from API response"""
remove_end_sequence: bool = True
"""Whether or not to remove the end sequence token."""
bad_words: List[str] = []
"""List of tokens not allowed to be generated."""
top_p: int = 1
"""Total probability mass of tokens to consider at each step."""
top_k: int = 50
"""The number of highest probability tokens to keep for top-k filtering."""
repetition_penalty: float = 1.0
"""Penalizes repeated tokens. 1.0 means no penalty."""
num_beams: int = 1
"""Number of beams for beam search."""
num_return_sequences: int = 1
"""How many completions to generate for each prompt."""
nlpcloud_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nlpcloud_api_key = get_from_dict_or_env(
values, "nlpcloud_api_key", "NLPCLOUD_API_KEY"
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"],
nlpcloud_api_key,
gpu=values["gpu"],
lang=values["lang"],
)
except ImportError:
raise ImportError(
"Could not import nlpcloud python package. "
"Please install it with `pip install nlpcloud`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling NLPCloud API."""
return {
"temperature": self.temperature,
"max_length": self.max_length,
"length_no_input": self.length_no_input,
"remove_input": self.remove_input,
"remove_end_sequence": self.remove_end_sequence,
"bad_words": self.bad_words,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
"num_beams": self.num_beams,
"num_return_sequences": self.num_return_sequences,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_name": self.model_name},
**{"gpu": self.gpu},
**{"lang": self.lang},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nlpcloud"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
"""
if stop and len(stop) > 1:
raise ValueError(
"NLPCloud only supports a single stop sequence per generation."
"Pass in a list of length 1."
)
elif stop and len(stop) == 1:
end_sequence = stop[0]
else:
end_sequence = None
params = {**self._default_params, **kwargs}
response = self.client.generation(prompt, end_sequence=end_sequence, **params)
return response["generated_text"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~databerry.py | from typing import List, Optional
import aiohttp
import requests
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.schema import BaseRetriever, Document
class DataberryRetriever(BaseRetriever):
"""`Databerry API` retriever."""
datastore_url: str
top_k: Optional[int]
api_key: Optional[str]
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
response = requests.post(
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
)
data = response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST",
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
) as response:
data = await response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~generative_agents~memory.py | import logging
import re
from datetime import datetime
from typing import Any, Dict, List, Optional
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain.schema import BaseMemory, Document
from langchain.schema.language_model import BaseLanguageModel
from langchain.utils import mock_now
logger = logging.getLogger(__name__)
class GenerativeAgentMemory(BaseMemory):
"""Memory for the generative agent."""
llm: BaseLanguageModel
"""The core language model."""
memory_retriever: TimeWeightedVectorStoreRetriever
"""The retriever to fetch related memories."""
verbose: bool = False
reflection_threshold: Optional[float] = None
"""When aggregate_importance exceeds reflection_threshold, stop to reflect."""
current_plan: List[str] = []
"""The current plan of the agent."""
# A weight of 0.15 makes this less important than it
# would be otherwise, relative to salience and time
importance_weight: float = 0.15
"""How much weight to assign the memory importance."""
aggregate_importance: float = 0.0 # : :meta private:
"""Track the sum of the 'importance' of recent memories.
Triggers reflection when it reaches reflection_threshold."""
max_tokens_limit: int = 1200 # : :meta private:
# input keys
queries_key: str = "queries"
most_recent_memories_token_key: str = "recent_memories_token"
add_memory_key: str = "add_memory"
# output keys
relevant_memories_key: str = "relevant_memories"
relevant_memories_simple_key: str = "relevant_memories_simple"
most_recent_memories_key: str = "most_recent_memories"
now_key: str = "now"
reflecting: bool = False
def chain(self, prompt: PromptTemplate) -> LLMChain:
return LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)
@staticmethod
def _parse_list(text: str) -> List[str]:
"""Parse a newline-separated string into a list of strings."""
lines = re.split(r"\n", text.strip())
lines = [line for line in lines if line.strip()] # remove empty lines
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]:
"""Return the 3 most salient high-level questions about recent observations."""
prompt = PromptTemplate.from_template(
"{observations}\n\n"
"Учитывая только приведенную выше информацию, какие 3 наиболее важных "
"вопроса высокого уровня мы можем ответить о субъектах в утверждениях?\n"
"Укажи каждый вопрос на новой строке."
)
observations = self.memory_retriever.memory_stream[-last_k:]
observation_str = "\n".join(
[self._format_memory_detail(o) for o in observations]
)
result = self.chain(prompt).run(observations=observation_str)
return self._parse_list(result)
def _get_insights_on_topic(
self, topic: str, now: Optional[datetime] = None
) -> List[str]:
"""Generate 'insights' on a topic of reflection, based on pertinent memories."""
prompt = PromptTemplate.from_template(
"Утверждения, относящиеся к: '{topic}'\n"
"---\n"
"{related_statements}\n"
"---\n"
"Какие 5 высокоуровневых новых выводов"
" ты можешь сделать из приведенных выше утверждений, "
"которые актуальны для ответа на следующий вопрос?\n"
"Не включай выводы, которые не относятся к вопросу.\n"
"Не повторяй выводы, которые уже были сделаны.\n\n"
"Question: {topic}\n\n"
"(пример формата: вывод (из-за 1, 5, 3))\n"
)
related_memories = self.fetch_memories(topic, now=now)
related_statements = "\n".join(
[
self._format_memory_detail(memory, prefix=f"{i+1}. ")
for i, memory in enumerate(related_memories)
]
)
result = self.chain(prompt).run(
topic=topic, related_statements=related_statements
)
# TODO: Parse the connections between memories and insights
return self._parse_list(result)
def pause_to_reflect(self, now: Optional[datetime] = None) -> List[str]:
"""Reflect on recent observations and generate 'insights'."""
if self.verbose:
logger.info("Character is reflecting")
new_insights = []
topics = self._get_topics_of_reflection()
for topic in topics:
insights = self._get_insights_on_topic(topic, now=now)
for insight in insights:
self.add_memory(insight, now=now)
new_insights.extend(insights)
return new_insights
def _score_memory_importance(self, memory_content: str) -> float:
"""Score the absolute importance of the given memory."""
prompt = PromptTemplate.from_template(
"На шкале от 1 до 10, где 1 - это совершенно обыденное"
+ " (например, чистка зубов, застилание кровати) и 10 -"
+ " это чрезвычайно важное (например, расставание, поступление в колледж),"
+ " оцени вероятную важность следующего"
+ " фрагмента памяти. Ответь одним числом."
+ "\nПамять: {memory_content}"
+ "\nОценка: "
)
score = self.chain(prompt).run(memory_content=memory_content).strip()
if self.verbose:
logger.info(f"Importance score: {score}")
match = re.search(r"^\D*(\d+)", score)
if match:
return (float(match.group(1)) / 10) * self.importance_weight
else:
return 0.0
def _score_memories_importance(self, memory_content: str) -> List[float]:
"""Score the absolute importance of the given memory."""
prompt = PromptTemplate.from_template(
"На шкале от 1 до 10, где 1 - это совершенно обыденное"
+ " (например, чистка зубов, застилание кровати) и 10 -"
+ " это чрезвычайно важное (например, расставание, поступление в колледж),"
+ " оцени вероятную важность следующего фрагмента памяти."
+ " Всегда отвечай только списком чисел."
+ " Если дана только одна память, все равно отвечай списком."
+ " Память разделена точками с запятой (;)"
+ "\Память: {memory_content}"
+ "\nОценка: "
)
scores = self.chain(prompt).run(memory_content=memory_content).strip()
if self.verbose:
logger.info(f"Importance scores: {scores}")
# Split into list of strings and convert to floats
scores_list = [float(x) for x in scores.split(";")]
return scores_list
def add_memories(
self, memory_content: str, now: Optional[datetime] = None
) -> List[str]:
"""Add an observations or memories to the agent's memory."""
importance_scores = self._score_memories_importance(memory_content)
self.aggregate_importance += max(importance_scores)
memory_list = memory_content.split(";")
documents = []
for i in range(len(memory_list)):
documents.append(
Document(
page_content=memory_list[i],
metadata={"importance": importance_scores[i]},
)
)
result = self.memory_retriever.add_documents(documents, current_time=now)
# After an agent has processed a certain amount of memories (as measured by
# aggregate importance), it is time to reflect on recent events to add
# more synthesized memories to the agent's memory stream.
if (
self.reflection_threshold is not None
and self.aggregate_importance > self.reflection_threshold
and not self.reflecting
):
self.reflecting = True
self.pause_to_reflect(now=now)
# Hack to clear the importance from reflection
self.aggregate_importance = 0.0
self.reflecting = False
return result
def add_memory(
self, memory_content: str, now: Optional[datetime] = None
) -> List[str]:
"""Add an observation or memory to the agent's memory."""
importance_score = self._score_memory_importance(memory_content)
self.aggregate_importance += importance_score
document = Document(
page_content=memory_content, metadata={"importance": importance_score}
)
result = self.memory_retriever.add_documents([document], current_time=now)
# After an agent has processed a certain amount of memories (as measured by
# aggregate importance), it is time to reflect on recent events to add
# more synthesized memories to the agent's memory stream.
if (
self.reflection_threshold is not None
and self.aggregate_importance > self.reflection_threshold
and not self.reflecting
):
self.reflecting = True
self.pause_to_reflect(now=now)
# Hack to clear the importance from reflection
self.aggregate_importance = 0.0
self.reflecting = False
return result
def fetch_memories(
self, observation: str, now: Optional[datetime] = None
) -> List[Document]:
"""Fetch related memories."""
if now is not None:
with mock_now(now):
return self.memory_retriever.get_relevant_documents(observation)
else:
return self.memory_retriever.get_relevant_documents(observation)
def format_memories_detail(self, relevant_memories: List[Document]) -> str:
content = []
for mem in relevant_memories:
content.append(self._format_memory_detail(mem, prefix="- "))
return "\n".join([f"{mem}" for mem in content])
def _format_memory_detail(self, memory: Document, prefix: str = "") -> str:
created_time = memory.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p")
return f"{prefix}[{created_time}] {memory.page_content.strip()}"
def format_memories_simple(self, relevant_memories: List[Document]) -> str:
return "; ".join([f"{mem.page_content}" for mem in relevant_memories])
def _get_memories_until_limit(self, consumed_tokens: int) -> str:
"""Reduce the number of tokens in the documents."""
result = []
for doc in self.memory_retriever.memory_stream[::-1]:
if consumed_tokens >= self.max_tokens_limit:
break
consumed_tokens += self.llm.get_num_tokens(doc.page_content)
if consumed_tokens < self.max_tokens_limit:
result.append(doc)
return self.format_memories_simple(result)
@property
def memory_variables(self) -> List[str]:
"""Input keys this memory class will load dynamically."""
return []
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
queries = inputs.get(self.queries_key)
now = inputs.get(self.now_key)
if queries is not None:
relevant_memories = [
mem for query in queries for mem in self.fetch_memories(query, now=now)
]
return {
self.relevant_memories_key: self.format_memories_detail(
relevant_memories
),
self.relevant_memories_simple_key: self.format_memories_simple(
relevant_memories
),
}
most_recent_memories_token = inputs.get(self.most_recent_memories_token_key)
if most_recent_memories_token is not None:
return {
self.most_recent_memories_key: self._get_memories_until_limit(
most_recent_memories_token
)
}
return {}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None:
"""Save the context of this model run to memory."""
# TODO: fix the save memory key
mem = outputs.get(self.add_memory_key)
now = outputs.get(self.now_key)
if mem:
self.add_memory(mem, now=now)
def clear(self) -> None:
"""Clear memory contents."""
# TODO
| [
" Всегда отвечай только списком чисел.",
"{related_statements}\n",
" фрагмента памяти. Ответь одним числом.",
" Если дана только одна память, все равно отвечай списком.",
"Утверждения, относящиеся к: '{topic}'\n",
" (например, чистка зубов, застилание кровати) и 10 -",
" ты можешь сделать из приведенных выше утверждений, ",
"Не повторяй выводы, которые уже были сделаны.\n\n",
" это чрезвычайно важное (например, расставание, поступление в колледж),",
"Какие 5 высокоуровневых новых выводов",
"\\Память: {memory_content}",
"На шкале от 1 до 10, где 1 - это совершенно обыденное",
"вопроса высокого уровня мы можем ответить о субъектах в утверждениях?\n",
"{observations}\n\nУчитывая только приведенную выше информацию, какие 3 наиболее важных вопроса высокого уровня мы можем ответить о субъектах в утверждениях?\nУкажи каждый вопрос на новой строке.",
"Не включай выводы, которые не относятся к вопросу.\n",
"{observations}\n\n",
"Утверждения, относящиеся к: '{topic}'\n---\n{related_statements}\n---\nКакие 5 высокоуровневых новых выводов ты можешь сделать из приведенных выше утверждений, которые актуальны для ответа на следующий вопрос?\nНе включай выводы, которые не относятся к вопросу.\nНе повторяй выводы, которые уже были сделаны.\n\nQuestion: {topic}\n\n(пример формата: вывод (из-за 1, 5, 3))\n",
" оцени вероятную важность следующего",
" оцени вероятную важность следующего фрагмента памяти.",
"Укажи каждый вопрос на новой строке.",
"(пример формата: вывод (из-за 1, 5, 3))\n",
"\nПамять: {memory_content}",
"которые актуальны для ответа на следующий вопрос?\n",
"---\n",
"На шкале от 1 до 10, где 1 - это совершенно обыденное (например, чистка зубов, застилание кровати) и 10 - это чрезвычайно важное (например, расставание, поступление в колледж), оцени вероятную важность следующего фрагмента памяти. Ответь одним числом.\nПамять: {memory_content}\nОценка: ",
"Учитывая только приведенную выше информацию, какие 3 наиболее важных ",
" Память разделена точками с запятой (;)",
"\nОценка: ",
"Question: {topic}\n\n",
"На шкале от 1 до 10, где 1 - это совершенно обыденное (например, чистка зубов, застилание кровати) и 10 - это чрезвычайно важное (например, расставание, поступление в колледж), оцени вероятную важность следующего фрагмента памяти. Всегда отвечай только списком чисел. Если дана только одна память, все равно отвечай списком. Память разделена точками с запятой (;)\\Память: {memory_content}\nОценка: "
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~twitter.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import tweepy
from tweepy import OAuth2BearerHandler, OAuthHandler
def _dependable_tweepy_import() -> tweepy:
try:
import tweepy
except ImportError:
raise ImportError(
"tweepy package not found, please install it with `pip install tweepy`"
)
return tweepy
class TwitterTweetLoader(BaseLoader):
"""Load `Twitter` tweets.
Read tweets of the user's Twitter handle.
First you need to go to
`https://developer.twitter.com/en/docs/twitter-api
/getting-started/getting-access-to-the-twitter-api`
to get your token. And create a v2 version of the app.
"""
def __init__(
self,
auth_handler: Union[OAuthHandler, OAuth2BearerHandler],
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
):
self.auth = auth_handler
self.twitter_users = twitter_users
self.number_tweets = number_tweets
def load(self) -> List[Document]:
"""Load tweets."""
tweepy = _dependable_tweepy_import()
api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser())
results: List[Document] = []
for username in self.twitter_users:
tweets = api.user_timeline(screen_name=username, count=self.number_tweets)
user = api.get_user(screen_name=username)
docs = self._format_tweets(tweets, user)
results.extend(docs)
return results
def _format_tweets(
self, tweets: List[Dict[str, Any]], user_info: dict
) -> Iterable[Document]:
"""Format tweets into a string."""
for tweet in tweets:
metadata = {
"created_at": tweet["created_at"],
"user_info": user_info,
}
yield Document(
page_content=tweet["text"],
metadata=metadata,
)
@classmethod
def from_bearer_token(
cls,
oauth2_bearer_token: str,
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
) -> TwitterTweetLoader:
"""Create a TwitterTweetLoader from OAuth2 bearer token."""
tweepy = _dependable_tweepy_import()
auth = tweepy.OAuth2BearerHandler(oauth2_bearer_token)
return cls(
auth_handler=auth,
twitter_users=twitter_users,
number_tweets=number_tweets,
)
@classmethod
def from_secrets(
cls,
access_token: str,
access_token_secret: str,
consumer_key: str,
consumer_secret: str,
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
) -> TwitterTweetLoader:
"""Create a TwitterTweetLoader from access tokens and secrets."""
tweepy = _dependable_tweepy_import()
auth = tweepy.OAuthHandler(
access_token=access_token,
access_token_secret=access_token_secret,
consumer_key=consumer_key,
consumer_secret=consumer_secret,
)
return cls(
auth_handler=auth,
twitter_users=twitter_users,
number_tweets=number_tweets,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~annoy.py | from __future__ import annotations
import os
import pickle
import uuid
from configparser import ConfigParser
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
INDEX_METRICS = frozenset(["angular", "euclidean", "manhattan", "hamming", "dot"])
DEFAULT_METRIC = "angular"
def dependable_annoy_import() -> Any:
"""Import annoy if available, otherwise raise error."""
try:
import annoy
except ImportError:
raise ImportError(
"Could not import annoy python package. "
"Please install it with `pip install --user annoy` "
)
return annoy
class Annoy(VectorStore):
"""`Annoy` vector store.
To use, you should have the ``annoy`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Annoy
db = Annoy(embedding_function, index, docstore, index_to_docstore_id)
"""
def __init__(
self,
embedding_function: Callable,
index: Any,
metric: str,
docstore: Docstore,
index_to_docstore_id: Dict[int, str],
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index = index
self.metric = metric
self.docstore = docstore
self.index_to_docstore_id = index_to_docstore_id
@property
def embeddings(self) -> Optional[Embeddings]:
# TODO: Accept embedding object directly
return None
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
raise NotImplementedError(
"Annoy does not allow to add new data once the index is build."
)
def process_index_results(
self, idxs: List[int], dists: List[float]
) -> List[Tuple[Document, float]]:
"""Turns annoy results into a list of documents and scores.
Args:
idxs: List of indices of the documents in the index.
dists: List of distances of the documents in the index.
Returns:
List of Documents and scores.
"""
docs = []
for idx, dist in zip(idxs, dists):
_id = self.index_to_docstore_id[idx]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append((doc, dist))
return docs
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
idxs, dists = self.index.get_nns_by_vector(
embedding, k, search_k=search_k, include_distances=True
)
return self.process_index_results(idxs, dists)
def similarity_search_with_score_by_index(
self, docstore_index: int, k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
idxs, dists = self.index.get_nns_by_item(
docstore_index, k, search_k=search_k, include_distances=True
)
return self.process_index_results(idxs, dists)
def similarity_search_with_score(
self, query: str, k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function(query)
docs = self.similarity_search_with_score_by_vector(embedding, k, search_k)
return docs
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding, k, search_k
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_index(
self, docstore_index: int, k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to docstore_index.
Args:
docstore_index: Index of document in docstore
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_index(
docstore_index, k, search_k
)
return [doc for doc, _ in docs_and_scores]
def similarity_search(
self, query: str, k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, search_k)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
k: Number of Documents to return. Defaults to 4.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
idxs = self.index.get_nns_by_vector(
embedding, fetch_k, search_k=-1, include_distances=False
)
embeddings = [self.index.get_item_vector(i) for i in idxs]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
embeddings,
k=k,
lambda_mult=lambda_mult,
)
# ignore the -1's if not enough docs are returned/indexed
selected_indices = [idxs[i] for i in mmr_selected if i != -1]
docs = []
for i in selected_indices:
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append(doc)
return docs
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult=lambda_mult
)
return docs
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
if metric not in INDEX_METRICS:
raise ValueError(
(
f"Unsupported distance metric: {metric}. "
f"Expected one of {list(INDEX_METRICS)}"
)
)
annoy = dependable_annoy_import()
if not embeddings:
raise ValueError("embeddings must be provided to build AnnoyIndex")
f = len(embeddings[0])
index = annoy.AnnoyIndex(f, metric=metric)
for i, emb in enumerate(embeddings):
index.add_item(i, emb)
index.build(trees, n_jobs=n_jobs)
documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))}
docstore = InMemoryDocstore(
{index_to_id[i]: doc for i, doc in enumerate(documents)}
)
return cls(embedding.embed_query, index, metric, docstore, index_to_id)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
"""Construct Annoy wrapper from raw documents.
Args:
texts: List of documents to index.
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Annoy
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
index = Annoy.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(
texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
"""Construct Annoy wrapper from embeddings.
Args:
text_embeddings: List of tuples of (text, embedding)
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1
This is a user friendly interface that:
1. Creates an in memory docstore with provided embeddings
2. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Annoy
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
db = Annoy.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs
)
def save_local(self, folder_path: str, prefault: bool = False) -> None:
"""Save Annoy index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to save index, docstore,
and index_to_docstore_id to.
prefault: Whether to pre-load the index into memory.
"""
path = Path(folder_path)
os.makedirs(path, exist_ok=True)
# save index, index config, docstore and index_to_docstore_id
config_object = ConfigParser()
config_object["ANNOY"] = {
"f": self.index.f,
"metric": self.metric,
}
self.index.save(str(path / "index.annoy"), prefault=prefault)
with open(path / "index.pkl", "wb") as file:
pickle.dump((self.docstore, self.index_to_docstore_id, config_object), file)
@classmethod
def load_local(
cls,
folder_path: str,
embeddings: Embeddings,
) -> Annoy:
"""Load Annoy index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries.
"""
path = Path(folder_path)
# load index separately since it is not picklable
annoy = dependable_annoy_import()
# load docstore and index_to_docstore_id
with open(path / "index.pkl", "rb") as file:
docstore, index_to_docstore_id, config_object = pickle.load(file)
f = int(config_object["ANNOY"]["f"])
metric = config_object["ANNOY"]["metric"]
index = annoy.AnnoyIndex(f, metric=metric)
index.load(str(path / "index.annoy"))
return cls(
embeddings.embed_query, index, metric, docstore, index_to_docstore_id
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~document_loaders~test_web_base.py | import pytest as pytest
from langchain.document_loaders.web_base import WebBaseLoader
class TestWebBaseLoader:
@pytest.mark.requires("bs4")
def test_respect_user_specified_user_agent(self) -> None:
user_specified_user_agent = "user_specified_user_agent"
header_template = {"User-Agent": user_specified_user_agent}
url = "https://www.example.com"
loader = WebBaseLoader(url, header_template=header_template)
assert loader.session.headers["User-Agent"] == user_specified_user_agent
def test_web_path_parameter(self) -> None:
web_base_loader = WebBaseLoader(web_paths=["https://www.example.com"])
assert web_base_loader.web_paths == ["https://www.example.com"]
web_base_loader = WebBaseLoader(web_path=["https://www.example.com"])
assert web_base_loader.web_paths == ["https://www.example.com"]
web_base_loader = WebBaseLoader(web_path="https://www.example.com")
assert web_base_loader.web_paths == ["https://www.example.com"]
| [
"{'User-Agent': 'user_specified_user_agent'}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~zep.py | from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
if TYPE_CHECKING:
from zep_python import MemorySearchResult
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class ZepRetriever(BaseRetriever):
"""`Zep` MemoryStore Retriever.
Search your user's long-term chat history with Zep.
Zep offers both simple semantic search and Maximal Marginal Relevance (MMR)
reranking of search results.
Note: You will need to provide the user's `session_id` to use this retriever.
Args:
url: URL of your Zep server (required)
api_key: Your Zep API key (optional)
session_id: Identifies your user or a user's session (required)
top_k: Number of documents to return (default: 3, optional)
search_type: Type of search to perform (similarity / mmr) (default: similarity,
optional)
mmr_lambda: Lambda value for MMR search. Defaults to 0.5 (optional)
Zep - Fast, scalable building blocks for LLM Apps
=========
Zep is an open source platform for productionizing LLM apps. Go from a prototype
built in LangChain or LlamaIndex, or a custom app, to production in minutes without
rewriting code.
For server installation instructions, see:
https://docs.getzep.com/deployment/quickstart/
"""
zep_client: Optional[Any] = None
"""Zep client."""
url: str
"""URL of your Zep server."""
api_key: Optional[str] = None
"""Your Zep API key."""
session_id: str
"""Zep session ID."""
top_k: Optional[int]
"""Number of items to return."""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
mmr_lambda: Optional[float] = None
"""Lambda value for MMR search."""
@root_validator(pre=True)
def create_client(cls, values: dict) -> dict:
try:
from zep_python import ZepClient
except ImportError:
raise ImportError(
"Could not import zep-python package. "
"Please install it with `pip install zep-python`."
)
values["zep_client"] = values.get(
"zep_client",
ZepClient(base_url=values["url"], api_key=values.get("api_key")),
)
return values
def _search_result_to_doc(
self, results: List[MemorySearchResult]
) -> List[Document]:
return [
Document(
page_content=r.message.pop("content"),
metadata={"score": r.dist, **r.message},
)
for r in results
if r.message
]
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
metadata: Optional[Dict[str, Any]] = None,
) -> List[Document]:
from zep_python import MemorySearchPayload
if not self.zep_client:
raise RuntimeError("Zep client not initialized.")
payload: MemorySearchPayload = MemorySearchPayload(
text=query,
metadata=metadata,
search_type=self.search_type,
mmr_lambda=self.mmr_lambda,
)
results: List[MemorySearchResult] = self.zep_client.memory.search_memory(
self.session_id, payload, limit=self.top_k
)
return self._search_result_to_doc(results)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
metadata: Optional[Dict[str, Any]] = None,
) -> List[Document]:
from zep_python import MemorySearchPayload
if not self.zep_client:
raise RuntimeError("Zep client not initialized.")
payload: MemorySearchPayload = MemorySearchPayload(
text=query,
metadata=metadata,
search_type=self.search_type,
mmr_lambda=self.mmr_lambda,
)
results: List[MemorySearchResult] = await self.zep_client.memory.asearch_memory(
self.session_id, payload, limit=self.top_k
)
return self._search_result_to_doc(results)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~llamacpp.py | from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator
from langchain.schema.embeddings import Embeddings
class LlamaCppEmbeddings(BaseModel, Embeddings):
"""llama.cpp embedding models.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.embeddings import LlamaCppEmbeddings
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
"""
client: Any #: :meta private:
model_path: str
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use. If None, the number
of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers")
"""Number of layers to be loaded into gpu memory. Default None."""
verbose: bool = Field(True, alias="verbose")
"""Print verbose output to stderr."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
model_param_names = [
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
"verbose",
]
model_params = {k: values[k] for k in model_param_names}
# For backwards compatibility, only include if non-null.
if values["n_gpu_layers"] is not None:
model_params["n_gpu_layers"] = values["n_gpu_layers"]
try:
from llama_cpp import Llama
values["client"] = Llama(model_path, embedding=True, **model_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using the Llama model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Embed a query using the Llama model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(text)
return list(map(float, embedding))
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~parsers~html~bs4.py | """Loader that uses bs4 to load HTML files, enriching metadata with page title."""
import logging
from typing import Any, Dict, Iterator, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
logger = logging.getLogger(__name__)
class BS4HTMLParser(BaseBlobParser):
"""Pparse HTML files using `Beautiful Soup`."""
def __init__(
self,
*,
features: str = "lxml",
get_text_separator: str = "",
**kwargs: Any,
) -> None:
"""Initialize a bs4 based HTML parser."""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.bs_kwargs = {"features": features, **kwargs}
self.get_text_separator = get_text_separator
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Load HTML document into document objects."""
from bs4 import BeautifulSoup
with blob.as_bytes_io() as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": blob.source,
"title": title,
}
yield Document(page_content=text, metadata=metadata)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~qa_with_sources~stuff_prompt.py | # flake8: noqa
from langchain.prompts import PromptTemplate
template = """Учитывая следующие выдержки из длинного документа и вопрос, создай окончательный ответ с ссылками ("SOURCES").
Если ты не знаешь ответа, просто скажи, что не знаешь. Не пытайся выдумывать ответ.
ВСЕГДА возвращай часть "SOURCES" в своем ответе.
ВОПРОС: Какое государство/страна управляет толкованием контракта?
=========
Содержание: Это Соглашение регулируется английским законодательством, и стороны подчиняются исключительной юрисдикции английских судов в отношении любого спора (контрактного или внеконтрактного) по данному Соглашению, за исключением того, что любая сторона может обратиться в любой суд за получением судебного запрета или иного средства защиты своих прав интеллектуальной собственности.
Источник: 28-pl
Содержание: Нет Отказа. Несоблюдение или задержка в осуществлении любого права или средства правовой защиты по данному Соглашению не является отказом от такого (или любого другого) права или средства правовой защиты.\n\n11.7 Разделимость. Недействительность, незаконность или неосуществимость любого условия (или части условия) данного Соглашения не влияет на продолжение в силе остатка условия (если таковой имеется) и данного Соглашения.\n\n11.8 Нет Агентства. За исключением как это прямо указано иначе, ничто в данном Соглашении не создает агентство, партнерство или совместное предприятие любого рода между сторонами.\n\n11.9 Нет Третьих Лиц-Бенефициаров.
Источник: 30-pl
Содержание: (b) если Google верит, в доброй вере, что Дистрибьютор нарушил или заставил Google нарушить любые Законы против Взяточничества (как определено в Пункте 8.5) или что такое нарушение вполне вероятно,
Источник: 4-pl
=========
FINAL ANSWER: Это Соглашение регулируется английским законодательством.
SOURCES: 28-pl
ВОПРОС: {question}
=========
{summaries}
=========
FINAL ANSWER:"""
PROMPT = PromptTemplate(template=template, input_variables=["summaries", "question"])
EXAMPLE_PROMPT = PromptTemplate(
template="Содержание: {page_content}\nИсточник: {source}",
input_variables=["page_content", "source"],
)
| [
"Учитывая следующие выдержки из длинного документа и вопрос, создай окончательный ответ с ссылками (\"SOURCES\"). \nЕсли ты не знаешь ответа, просто скажи, что не знаешь. Не пытайся выдумывать ответ.\nВСЕГДА возвращай часть \"SOURCES\" в своем ответе.\n\nВОПРОС: Какое государство/страна управляет толкованием контракта?\n=========\nСодержание: Это Соглашение регулируется английским законодательством, и стороны подчиняются исключительной юрисдикции английских судов в отношении любого спора (контрактного или внеконтрактного) по данному Соглашению, за исключением того, что любая сторона может обратиться в любой суд за получением судебного запрета или иного средства защиты своих прав интеллектуальной собственности.\nИсточник: 28-pl\nСодержание: Нет Отказа. Несоблюдение или задержка в осуществлении любого права или средства правовой защиты по данному Соглашению не является отказом от такого (или любого другого) права или средства правовой защиты.\n\n11.7 Разделимость. Недействительность, незаконность или неосуществимость любого условия (или части условия) данного Соглашения не влияет на продолжение в силе остатка условия (если таковой имеется) и данного Соглашения.\n\n11.8 Нет Агентства. За исключением как это прямо указано иначе, ничто в данном Соглашении не создает агентство, партнерство или совместное предприятие любого рода между сторонами.\n\n11.9 Нет Третьих Лиц-Бенефициаров.\nИсточник: 30-pl\nСодержание: (b) если Google верит, в доброй вере, что Дистрибьютор нарушил или заставил Google нарушить любые Законы против Взяточничества (как определено в Пункте 8.5) или что такое нарушение вполне вероятно,\nИсточник: 4-pl\n=========\nFINAL ANSWER: Это Соглашение регулируется английским законодательством.\nSOURCES: 28-pl\n\nВОПРОС: {question}\n=========\n{summaries}\n=========\nFINAL ANSWER:",
"question",
"page_content",
"Содержание: {page_content}\nИсточник: {source}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~memory~chat_message_histories~test_zep.py | from typing import TYPE_CHECKING
import pytest
from pytest_mock import MockerFixture
from langchain.memory.chat_message_histories import ZepChatMessageHistory
from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
if TYPE_CHECKING:
from zep_python import ZepClient
@pytest.fixture
@pytest.mark.requires("zep_python")
def zep_chat(mocker: MockerFixture) -> ZepChatMessageHistory:
mock_zep_client: ZepClient = mocker.patch("zep_python.ZepClient", autospec=True)
mock_zep_client.memory = mocker.patch(
"zep_python.memory.client.MemoryClient", autospec=True
)
zep_chat: ZepChatMessageHistory = ZepChatMessageHistory(
"test_session", "http://localhost:8000"
)
zep_chat.zep_client = mock_zep_client
return zep_chat
@pytest.mark.requires("zep_python")
def test_messages(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
from zep_python import Memory, Message, Summary
mock_memory: Memory = Memory(
summary=Summary(
content="summary",
),
messages=[
Message(content="message", role="ai", metadata={"key": "value"}),
Message(content="message2", role="human", metadata={"key2": "value2"}),
],
)
zep_chat.zep_client.memory.get_memory.return_value = mock_memory # type: ignore
result = zep_chat.messages
assert len(result) == 3
assert isinstance(result[0], SystemMessage) # summary
assert isinstance(result[1], AIMessage)
assert isinstance(result[2], HumanMessage)
@pytest.mark.requires("zep_python")
def test_add_user_message(
mocker: MockerFixture, zep_chat: ZepChatMessageHistory
) -> None:
zep_chat.add_user_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_add_ai_message(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_ai_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_append(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_message(AIMessage(content="test message"))
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_search(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.search("test query")
zep_chat.zep_client.memory.search_memory.assert_called_once_with( # type: ignore
"test_session", mocker.ANY, limit=None
)
@pytest.mark.requires("zep_python")
def test_clear(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.clear()
zep_chat.zep_client.memory.delete_memory.assert_called_once_with( # type: ignore
"test_session"
)
| [
"message2",
"message",
"test message"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~output_parsers~regex_dict.py | from __future__ import annotations
import re
from typing import Dict, Optional
from langchain.schema import BaseOutputParser
class RegexDictParser(BaseOutputParser):
"""Parse the output of an LLM call into a Dictionary using a regex."""
regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private:
"""The regex pattern to use to parse the output."""
output_key_to_format: Dict[str, str]
"""The keys to use for the output."""
no_update_value: Optional[str] = None
"""The default key to use for the output."""
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_dict_parser"
def parse(self, text: str) -> Dict[str, str]:
"""Parse the output of an LLM call."""
result = {}
for output_key, expected_format in self.output_key_to_format.items():
specific_regex = self.regex_pattern.format(re.escape(expected_format))
matches = re.findall(specific_regex, text)
if not matches:
raise ValueError(
f"No match found for output key: {output_key} with expected format \
{expected_format} on text {text}"
)
elif len(matches) > 1:
raise ValueError(
f"Multiple matches found for output key: {output_key} with \
expected format {expected_format} on text {text}"
)
elif (
self.no_update_value is not None and matches[0] == self.no_update_value
):
continue
else:
result[output_key] = matches[0]
return result
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~parsers~audio.py | import logging
import time
from typing import Dict, Iterator, Optional, Tuple
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
from langchain.schema import Document
logger = logging.getLogger(__name__)
class OpenAIWhisperParser(BaseBlobParser):
"""Transcribe and parse audio files.
Audio transcription is with OpenAI Whisper model."""
def __init__(self, api_key: Optional[str] = None):
self.api_key = api_key
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import io
try:
import openai
except ImportError:
raise ImportError(
"openai package not found, please install it with "
"`pip install openai`"
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with " "`pip install pydub`"
)
# Set the API key if provided
if self.api_key:
openai.api_key = self.api_key
# Audio file from disk
audio = AudioSegment.from_file(blob.path)
# Define the duration of each chunk in minutes
# Need to meet 25MB size limit for Whisper API
chunk_duration = 20
chunk_duration_ms = chunk_duration * 60 * 1000
# Split the audio into chunk_duration_ms chunks
for split_number, i in enumerate(range(0, len(audio), chunk_duration_ms)):
# Audio chunk
chunk = audio[i : i + chunk_duration_ms]
file_obj = io.BytesIO(chunk.export(format="mp3").read())
if blob.source is not None:
file_obj.name = blob.source + f"_part_{split_number}.mp3"
else:
file_obj.name = f"part_{split_number}.mp3"
# Transcribe
print(f"Transcribing part {split_number+1}!")
attempts = 0
while attempts < 3:
try:
transcript = openai.Audio.transcribe("whisper-1", file_obj)
break
except Exception as e:
attempts += 1
print(f"Attempt {attempts} failed. Exception: {str(e)}")
time.sleep(5)
else:
print("Failed to transcribe after 3 attempts.")
continue
yield Document(
page_content=transcript.text,
metadata={"source": blob.source, "chunk": split_number},
)
class OpenAIWhisperParserLocal(BaseBlobParser):
"""Transcribe and parse audio files with OpenAI Whisper model.
Audio transcription with OpenAI Whisper model locally from transformers.
Parameters:
device - device to use
NOTE: By default uses the gpu if available,
if you want to use cpu, please set device = "cpu"
lang_model - whisper model to use, for example "openai/whisper-medium"
forced_decoder_ids - id states for decoder in multilanguage model,
usage example:
from transformers import WhisperProcessor
processor = WhisperProcessor.from_pretrained("openai/whisper-medium")
forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="french",
task="transcribe")
forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="french",
task="translate")
"""
def __init__(
self,
device: str = "0",
lang_model: Optional[str] = None,
forced_decoder_ids: Optional[Tuple[Dict]] = None,
):
"""Initialize the parser.
Args:
device: device to use.
lang_model: whisper model to use, for example "openai/whisper-medium".
Defaults to None.
forced_decoder_ids: id states for decoder in a multilanguage model.
Defaults to None.
"""
try:
from transformers import pipeline
except ImportError:
raise ImportError(
"transformers package not found, please install it with "
"`pip install transformers`"
)
try:
import torch
except ImportError:
raise ImportError(
"torch package not found, please install it with " "`pip install torch`"
)
# set device, cpu by default check if there is a GPU available
if device == "cpu":
self.device = "cpu"
if lang_model is not None:
self.lang_model = lang_model
print("WARNING! Model override. Using model: ", self.lang_model)
else:
# unless overridden, use the small base model on cpu
self.lang_model = "openai/whisper-base"
else:
if torch.cuda.is_available():
self.device = "cuda:0"
# check GPU memory and select automatically the model
mem = torch.cuda.get_device_properties(self.device).total_memory / (
1024**2
)
if mem < 5000:
rec_model = "openai/whisper-base"
elif mem < 7000:
rec_model = "openai/whisper-small"
elif mem < 12000:
rec_model = "openai/whisper-medium"
else:
rec_model = "openai/whisper-large"
# check if model is overridden
if lang_model is not None:
self.lang_model = lang_model
print("WARNING! Model override. Might not fit in your GPU")
else:
self.lang_model = rec_model
else:
"cpu"
print("Using the following model: ", self.lang_model)
# load model for inference
self.pipe = pipeline(
"automatic-speech-recognition",
model=self.lang_model,
chunk_length_s=30,
device=self.device,
)
if forced_decoder_ids is not None:
try:
self.pipe.model.config.forced_decoder_ids = forced_decoder_ids
except Exception as exception_text:
logger.info(
"Unable to set forced_decoder_ids parameter for whisper model"
f"Text of exception: {exception_text}"
"Therefore whisper model will use default mode for decoder"
)
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import io
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with `pip install pydub`"
)
try:
import librosa
except ImportError:
raise ImportError(
"librosa package not found, please install it with "
"`pip install librosa`"
)
# Audio file from disk
audio = AudioSegment.from_file(blob.path)
file_obj = io.BytesIO(audio.export(format="mp3").read())
# Transcribe
print(f"Transcribing part {blob.path}!")
y, sr = librosa.load(file_obj, sr=16000)
prediction = self.pipe(y.copy(), batch_size=8)["text"]
yield Document(
page_content=prediction,
metadata={"source": blob.source},
)
class YandexSTTParser(BaseBlobParser):
"""Transcribe and parse audio files.
Audio transcription is with OpenAI Whisper model."""
def __init__(
self,
*,
api_key: Optional[str] = None,
iam_token: Optional[str] = None,
model: str = "general",
language: str = "auto",
):
"""Initialize the parser.
Args:
api_key: API key for a service account
with the `ai.speechkit-stt.user` role.
iam_token: IAM token for a service account
with the `ai.speechkit-stt.user` role.
model: Recognition model name.
Defaults to general.
language: The language in ISO 639-1 format.
Defaults to automatic language recognition.
Either `api_key` or `iam_token` must be provided, but not both.
"""
if (api_key is None) == (iam_token is None):
raise ValueError(
"Either 'api_key' or 'iam_token' must be provided, but not both."
)
self.api_key = api_key
self.iam_token = iam_token
self.model = model
self.language = language
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
try:
from speechkit import configure_credentials, creds, model_repository
from speechkit.stt import AudioProcessingType
except ImportError:
raise ImportError(
"yandex-speechkit package not found, please install it with "
"`pip install yandex-speechkit`"
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with " "`pip install pydub`"
)
if self.api_key:
configure_credentials(
yandex_credentials=creds.YandexCredentials(api_key=self.api_key)
)
else:
configure_credentials(
yandex_credentials=creds.YandexCredentials(iam_token=self.iam_token)
)
audio = AudioSegment.from_file(blob.path)
model = model_repository.recognition_model()
model.model = self.model
model.language = self.language
model.audio_processing_type = AudioProcessingType.Full
result = model.transcribe(audio)
for res in result:
yield Document(
page_content=res.normalized_text,
metadata={"source": blob.source},
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~unstructured.py | """Loader that uses unstructured to load files."""
import collections
from abc import ABC, abstractmethod
from typing import IO, Any, Callable, Dict, List, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
def satisfies_min_unstructured_version(min_version: str) -> bool:
"""Check if the installed `Unstructured` version exceeds the minimum version
for the feature in question."""
from unstructured.__version__ import __version__ as __unstructured_version__
min_version_tuple = tuple([int(x) for x in min_version.split(".")])
# NOTE(MthwRobinson) - enables the loader to work when you're using pre-release
# versions of unstructured like 0.4.17-dev1
_unstructured_version = __unstructured_version__.split("-")[0]
unstructured_version_tuple = tuple(
[int(x) for x in _unstructured_version.split(".")]
)
return unstructured_version_tuple >= min_version_tuple
def validate_unstructured_version(min_unstructured_version: str) -> None:
"""Raise an error if the `Unstructured` version does not exceed the
specified minimum."""
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
f"unstructured>={min_unstructured_version} is required in this loader."
)
class UnstructuredBaseLoader(BaseLoader, ABC):
"""Base Loader that uses `Unstructured`."""
def __init__(
self,
mode: str = "single",
post_processors: Optional[List[Callable]] = None,
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
try:
import unstructured # noqa:F401
except ImportError:
raise ValueError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
_valid_modes = {"single", "elements", "paged"}
if mode not in _valid_modes:
raise ValueError(
f"Got {mode} for `mode`, but should be one of `{_valid_modes}`"
)
self.mode = mode
if not satisfies_min_unstructured_version("0.5.4"):
if "strategy" in unstructured_kwargs:
unstructured_kwargs.pop("strategy")
self.unstructured_kwargs = unstructured_kwargs
self.post_processors = post_processors or []
@abstractmethod
def _get_elements(self) -> List:
"""Get elements."""
@abstractmethod
def _get_metadata(self) -> dict:
"""Get metadata."""
def _post_process_elements(self, elements: list) -> list:
"""Applies post processing functions to extracted unstructured elements.
Post processing functions are str -> str callables are passed
in using the post_processors kwarg when the loader is instantiated."""
for element in elements:
for post_processor in self.post_processors:
element.apply(post_processor)
return elements
def load(self) -> List[Document]:
"""Load file."""
elements = self._get_elements()
self._post_process_elements(elements)
if self.mode == "elements":
docs: List[Document] = list()
for element in elements:
metadata = self._get_metadata()
# NOTE(MthwRobinson) - the attribute check is for backward compatibility
# with unstructured<0.4.9. The metadata attributed was added in 0.4.9.
if hasattr(element, "metadata"):
metadata.update(element.metadata.to_dict())
if hasattr(element, "category"):
metadata["category"] = element.category
docs.append(Document(page_content=str(element), metadata=metadata))
elif self.mode == "paged":
text_dict: Dict[int, str] = {}
meta_dict: Dict[int, Dict] = {}
for idx, element in enumerate(elements):
metadata = self._get_metadata()
if hasattr(element, "metadata"):
metadata.update(element.metadata.to_dict())
page_number = metadata.get("page_number", 1)
# Check if this page_number already exists in docs_dict
if page_number not in text_dict:
# If not, create new entry with initial text and metadata
text_dict[page_number] = str(element) + "\n\n"
meta_dict[page_number] = metadata
else:
# If exists, append to text and update the metadata
text_dict[page_number] += str(element) + "\n\n"
meta_dict[page_number].update(metadata)
# Convert the dict to a list of Document objects
docs = [
Document(page_content=text_dict[key], metadata=meta_dict[key])
for key in text_dict.keys()
]
elif self.mode == "single":
metadata = self._get_metadata()
text = "\n\n".join([str(el) for el in elements])
docs = [Document(page_content=text, metadata=metadata)]
else:
raise ValueError(f"mode of {self.mode} not supported.")
return docs
class UnstructuredFileLoader(UnstructuredBaseLoader):
"""Load files using `Unstructured`.
The file loader uses the
unstructured partition function and will automatically detect the file
type. You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredFileLoader
loader = UnstructuredFileLoader(
"example.pdf", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition
"""
def __init__(
self,
file_path: Union[str, List[str]],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
self.file_path = file_path
super().__init__(mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.auto import partition
return partition(filename=self.file_path, **self.unstructured_kwargs)
def _get_metadata(self) -> dict:
return {"source": self.file_path}
def get_elements_from_api(
file_path: Union[str, List[str], None] = None,
file: Union[IO, Sequence[IO], None] = None,
api_url: str = "https://api.unstructured.io/general/v0/general",
api_key: str = "",
**unstructured_kwargs: Any,
) -> List:
"""Retrieve a list of elements from the `Unstructured API`."""
if isinstance(file, collections.abc.Sequence) or isinstance(file_path, list):
from unstructured.partition.api import partition_multiple_via_api
_doc_elements = partition_multiple_via_api(
filenames=file_path,
files=file,
api_key=api_key,
api_url=api_url,
**unstructured_kwargs,
)
elements = []
for _elements in _doc_elements:
elements.extend(_elements)
return elements
else:
from unstructured.partition.api import partition_via_api
return partition_via_api(
filename=file_path,
file=file,
api_key=api_key,
api_url=api_url,
**unstructured_kwargs,
)
class UnstructuredAPIFileLoader(UnstructuredFileLoader):
"""Load files using `Unstructured` API.
By default, the loader makes a call to the hosted Unstructured API.
If you are running the unstructured API locally, you can change the
API rule by passing in the url parameter when you initialize the loader.
The hosted Unstructured API requires an API key. See
https://www.unstructured.io/api-key/ if you need to generate a key.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
```python
from langchain.document_loaders import UnstructuredAPIFileLoader
loader = UnstructuredFileAPILoader(
"example.pdf", mode="elements", strategy="fast", api_key="MY_API_KEY",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition
https://www.unstructured.io/api-key/
https://github.com/Unstructured-IO/unstructured-api
"""
def __init__(
self,
file_path: Union[str, List[str]] = "",
mode: str = "single",
url: str = "https://api.unstructured.io/general/v0/general",
api_key: str = "",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
validate_unstructured_version(min_unstructured_version="0.10.15")
self.url = url
self.api_key = api_key
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_metadata(self) -> dict:
return {"source": self.file_path}
def _get_elements(self) -> List:
return get_elements_from_api(
file_path=self.file_path,
api_key=self.api_key,
api_url=self.url,
**self.unstructured_kwargs,
)
class UnstructuredFileIOLoader(UnstructuredBaseLoader):
"""Load files using `Unstructured`.
The file loader
uses the unstructured partition function and will automatically detect the file
type. You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredFileIOLoader
with open("example.pdf", "rb") as f:
loader = UnstructuredFileIOLoader(
f, mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition
"""
def __init__(
self,
file: Union[IO, Sequence[IO]],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
self.file = file
super().__init__(mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.auto import partition
return partition(file=self.file, **self.unstructured_kwargs)
def _get_metadata(self) -> dict:
return {}
class UnstructuredAPIFileIOLoader(UnstructuredFileIOLoader):
"""Load files using `Unstructured` API.
By default, the loader makes a call to the hosted Unstructured API.
If you are running the unstructured API locally, you can change the
API rule by passing in the url parameter when you initialize the loader.
The hosted Unstructured API requires an API key. See
https://www.unstructured.io/api-key/ if you need to generate a key.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredAPIFileLoader
with open("example.pdf", "rb") as f:
loader = UnstructuredFileAPILoader(
f, mode="elements", strategy="fast", api_key="MY_API_KEY",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition
https://www.unstructured.io/api-key/
https://github.com/Unstructured-IO/unstructured-api
"""
def __init__(
self,
file: Union[IO, Sequence[IO]],
mode: str = "single",
url: str = "https://api.unstructured.io/general/v0/general",
api_key: str = "",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
if isinstance(file, collections.abc.Sequence):
validate_unstructured_version(min_unstructured_version="0.6.3")
if file:
validate_unstructured_version(min_unstructured_version="0.6.2")
self.url = url
self.api_key = api_key
super().__init__(file=file, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
return get_elements_from_api(
file=self.file,
api_key=self.api_key,
api_url=self.url,
**self.unstructured_kwargs,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~fauna.py | from typing import Iterator, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class FaunaLoader(BaseLoader):
"""Load from `FaunaDB`.
Attributes:
query (str): The FQL query string to execute.
page_content_field (str): The field that contains the content of each page.
secret (str): The secret key for authenticating to FaunaDB.
metadata_fields (Optional[Sequence[str]]):
Optional list of field names to include in metadata.
"""
def __init__(
self,
query: str,
page_content_field: str,
secret: str,
metadata_fields: Optional[Sequence[str]] = None,
):
self.query = query
self.page_content_field = page_content_field
self.secret = secret
self.metadata_fields = metadata_fields
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
try:
from fauna import Page, fql
from fauna.client import Client
from fauna.encoding import QuerySuccess
except ImportError:
raise ImportError(
"Could not import fauna python package. "
"Please install it with `pip install fauna`."
)
# Create Fauna Client
client = Client(secret=self.secret)
# Run FQL Query
response: QuerySuccess = client.query(fql(self.query))
page: Page = response.data
for result in page:
if result is not None:
document_dict = dict(result.items())
page_content = ""
for key, value in document_dict.items():
if key == self.page_content_field:
page_content = value
document: Document = Document(
page_content=page_content,
metadata={"id": result.id, "ts": result.ts},
)
yield document
if page.after is not None:
yield Document(
page_content="Next Page Exists",
metadata={"after": page.after},
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~amadeus~flight_search.py | import logging
from datetime import datetime as dt
from typing import Dict, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.amadeus.base import AmadeusBaseTool
logger = logging.getLogger(__name__)
class FlightSearchSchema(BaseModel):
"""Schema for the AmadeusFlightSearch tool."""
originLocationCode: str = Field(
description=(
" The three letter International Air Transport "
" Association (IATA) Location Identifier for the "
" search's origin airport. "
)
)
destinationLocationCode: str = Field(
description=(
" The three letter International Air Transport "
" Association (IATA) Location Identifier for the "
" search's destination airport. "
)
)
departureDateTimeEarliest: str = Field(
description=(
" The earliest departure datetime from the origin airport "
" for the flight search in the following format: "
' "YYYY-MM-DDTHH:MM", where "T" separates the date and time '
' components. For example: "2023-06-09T10:30:00" represents '
" June 9th, 2023, at 10:30 AM. "
)
)
departureDateTimeLatest: str = Field(
description=(
" The latest departure datetime from the origin airport "
" for the flight search in the following format: "
' "YYYY-MM-DDTHH:MM", where "T" separates the date and time '
' components. For example: "2023-06-09T10:30:00" represents '
" June 9th, 2023, at 10:30 AM. "
)
)
page_number: int = Field(
default=1,
description="The specific page number of flight results to retrieve",
)
class AmadeusFlightSearch(AmadeusBaseTool):
"""Tool for searching for a single flight between two airports."""
name: str = "single_flight_search"
description: str = (
" Use this tool to search for a single flight between the origin and "
" destination airports at a departure between an earliest and "
" latest datetime. "
)
args_schema: Type[FlightSearchSchema] = FlightSearchSchema
def _run(
self,
originLocationCode: str,
destinationLocationCode: str,
departureDateTimeEarliest: str,
departureDateTimeLatest: str,
page_number: int = 1,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> list:
try:
from amadeus import ResponseError
except ImportError as e:
raise ImportError(
"Unable to import amadeus, please install with `pip install amadeus`."
) from e
RESULTS_PER_PAGE = 10
# Authenticate and retrieve a client
client = self.client
# Check that earliest and latest dates are in the same day
earliestDeparture = dt.strptime(departureDateTimeEarliest, "%Y-%m-%dT%H:%M:%S")
latestDeparture = dt.strptime(departureDateTimeLatest, "%Y-%m-%dT%H:%M:%S")
if earliestDeparture.date() != latestDeparture.date():
logger.error(
" Error: Earliest and latest departure dates need to be the "
" same date. If you're trying to search for round-trip "
" flights, call this function for the outbound flight first, "
" and then call again for the return flight. "
)
return [None]
# Collect all results from the API
try:
response = client.shopping.flight_offers_search.get(
originLocationCode=originLocationCode,
destinationLocationCode=destinationLocationCode,
departureDate=latestDeparture.strftime("%Y-%m-%d"),
adults=1,
)
except ResponseError as error:
print(error)
# Generate output dictionary
output = []
for offer in response.data:
itinerary: Dict = {}
itinerary["price"] = {}
itinerary["price"]["total"] = offer["price"]["total"]
currency = offer["price"]["currency"]
currency = response.result["dictionaries"]["currencies"][currency]
itinerary["price"]["currency"] = {}
itinerary["price"]["currency"] = currency
segments = []
for segment in offer["itineraries"][0]["segments"]:
flight = {}
flight["departure"] = segment["departure"]
flight["arrival"] = segment["arrival"]
flight["flightNumber"] = segment["number"]
carrier = segment["carrierCode"]
carrier = response.result["dictionaries"]["carriers"][carrier]
flight["carrier"] = carrier
segments.append(flight)
itinerary["segments"] = []
itinerary["segments"] = segments
output.append(itinerary)
# Filter out flights after latest departure time
for index, offer in enumerate(output):
offerDeparture = dt.strptime(
offer["segments"][0]["departure"]["at"], "%Y-%m-%dT%H:%M:%S"
)
if offerDeparture > latestDeparture:
output.pop(index)
# Return the paginated results
startIndex = (page_number - 1) * RESULTS_PER_PAGE
endIndex = startIndex + RESULTS_PER_PAGE
return output[startIndex:endIndex]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~input.py | """DEPRECATED: Kept for backwards compatibility."""
from langchain.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
__all__ = [
"get_bolded_text",
"get_color_mapping",
"get_colored_text",
"print_text",
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~combine_documents~reduce.py | """Combine many documents together by recursively reducing them."""
from __future__ import annotations
from typing import Any, Callable, List, Optional, Protocol, Tuple
from langchain.callbacks.manager import Callbacks
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.docstore.document import Document
from langchain.pydantic_v1 import Extra
class CombineDocsProtocol(Protocol):
"""Interface for the combine_docs method."""
def __call__(self, docs: List[Document], **kwargs: Any) -> str:
"""Interface for the combine_docs method."""
class AsyncCombineDocsProtocol(Protocol):
"""Interface for the combine_docs method."""
async def __call__(self, docs: List[Document], **kwargs: Any) -> str:
"""Async interface for the combine_docs method."""
def _split_list_of_docs(
docs: List[Document], length_func: Callable, token_max: int, **kwargs: Any
) -> List[List[Document]]:
new_result_doc_list = []
_sub_result_docs = []
for doc in docs:
_sub_result_docs.append(doc)
_num_tokens = length_func(_sub_result_docs, **kwargs)
if _num_tokens > token_max:
if len(_sub_result_docs) == 1:
raise ValueError(
"A single document was longer than the context length,"
" we cannot handle this."
)
new_result_doc_list.append(_sub_result_docs[:-1])
_sub_result_docs = _sub_result_docs[-1:]
new_result_doc_list.append(_sub_result_docs)
return new_result_doc_list
def _collapse_docs(
docs: List[Document],
combine_document_func: CombineDocsProtocol,
**kwargs: Any,
) -> Document:
result = combine_document_func(docs, **kwargs)
combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()}
for doc in docs[1:]:
for k, v in doc.metadata.items():
if k in combined_metadata:
combined_metadata[k] += f", {v}"
else:
combined_metadata[k] = str(v)
return Document(page_content=result, metadata=combined_metadata)
async def _acollapse_docs(
docs: List[Document],
combine_document_func: AsyncCombineDocsProtocol,
**kwargs: Any,
) -> Document:
result = await combine_document_func(docs, **kwargs)
combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()}
for doc in docs[1:]:
for k, v in doc.metadata.items():
if k in combined_metadata:
combined_metadata[k] += f", {v}"
else:
combined_metadata[k] = str(v)
return Document(page_content=result, metadata=combined_metadata)
class ReduceDocumentsChain(BaseCombineDocumentsChain):
"""Combine documents by recursively reducing them.
This involves
- combine_documents_chain
- collapse_documents_chain
`combine_documents_chain` is ALWAYS provided. This is final chain that is called.
We pass all previous results to this chain, and the output of this chain is
returned as a final result.
`collapse_documents_chain` is used if the documents passed in are too many to all
be passed to `combine_documents_chain` in one go. In this case,
`collapse_documents_chain` is called recursively on as big of groups of documents
as are allowed.
Example:
.. code-block:: python
from langchain.chains import (
StuffDocumentsChain, LLMChain, ReduceDocumentsChain
)
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
# This controls how each document will be formatted. Specifically,
# it will be passed to `format_document` - see that function for more
# details.
document_prompt = PromptTemplate(
input_variables=["page_content"],
template="{page_content}"
)
document_variable_name = "context"
llm = OpenAI()
# The prompt here should take as an input variable the
# `document_variable_name`
prompt = PromptTemplate.from_template(
"Summarize this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
combine_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
)
chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
)
# If we wanted to, we could also pass in collapse_documents_chain
# which is specifically aimed at collapsing documents BEFORE
# the final call.
prompt = PromptTemplate.from_template(
"Collapse this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
collapse_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
)
chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_documents_chain,
)
"""
combine_documents_chain: BaseCombineDocumentsChain
"""Final chain to call to combine documents.
This is typically a StuffDocumentsChain."""
collapse_documents_chain: Optional[BaseCombineDocumentsChain] = None
"""Chain to use to collapse documents if needed until they can all fit.
If None, will use the combine_documents_chain.
This is typically a StuffDocumentsChain."""
token_max: int = 3000
"""The maximum number of tokens to group documents into. For example, if
set to 3000 then documents will be grouped into chunks of no greater than
3000 tokens before trying to combine them into a smaller chunk."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def _collapse_chain(self) -> BaseCombineDocumentsChain:
if self.collapse_documents_chain is not None:
return self.collapse_documents_chain
else:
return self.combine_documents_chain
def combine_docs(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[str, dict]:
"""Combine multiple documents recursively.
Args:
docs: List of documents to combine, assumed that each one is less than
`token_max`.
token_max: Recursively creates groups of documents less than this number
of tokens.
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
result_docs, extra_return_dict = self._collapse(
docs, token_max=token_max, callbacks=callbacks, **kwargs
)
return self.combine_documents_chain.combine_docs(
docs=result_docs, callbacks=callbacks, **kwargs
)
async def acombine_docs(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[str, dict]:
"""Async combine multiple documents recursively.
Args:
docs: List of documents to combine, assumed that each one is less than
`token_max`.
token_max: Recursively creates groups of documents less than this number
of tokens.
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
result_docs, extra_return_dict = await self._acollapse(
docs, token_max=token_max, callbacks=callbacks, **kwargs
)
return await self.combine_documents_chain.acombine_docs(
docs=result_docs, callbacks=callbacks, **kwargs
)
def _collapse(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[List[Document], dict]:
result_docs = docs
length_func = self.combine_documents_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
def _collapse_docs_func(docs: List[Document], **kwargs: Any) -> str:
return self._collapse_chain.run(
input_documents=docs, callbacks=callbacks, **kwargs
)
_token_max = token_max or self.token_max
while num_tokens is not None and num_tokens > _token_max:
new_result_doc_list = _split_list_of_docs(
result_docs, length_func, _token_max, **kwargs
)
result_docs = []
for docs in new_result_doc_list:
new_doc = _collapse_docs(docs, _collapse_docs_func, **kwargs)
result_docs.append(new_doc)
num_tokens = length_func(result_docs, **kwargs)
return result_docs, {}
async def _acollapse(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[List[Document], dict]:
result_docs = docs
length_func = self.combine_documents_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
async def _collapse_docs_func(docs: List[Document], **kwargs: Any) -> str:
return await self._collapse_chain.arun(
input_documents=docs, callbacks=callbacks, **kwargs
)
_token_max = token_max or self.token_max
while num_tokens is not None and num_tokens > _token_max:
new_result_doc_list = _split_list_of_docs(
result_docs, length_func, _token_max, **kwargs
)
result_docs = []
for docs in new_result_doc_list:
new_doc = await _acollapse_docs(docs, _collapse_docs_func, **kwargs)
result_docs.append(new_doc)
num_tokens = length_func(result_docs, **kwargs)
return result_docs, {}
@property
def _chain_type(self) -> str:
return "reduce_documents_chain"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~memory~zep_memory.py | from __future__ import annotations
from typing import Any, Dict, Optional
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import ZepChatMessageHistory
class ZepMemory(ConversationBufferMemory):
"""Persist your chain history to the Zep MemoryStore.
The number of messages returned by Zep and when the Zep server summarizes chat
histories is configurable. See the Zep documentation for more details.
Documentation: https://docs.getzep.com
Example:
.. code-block:: python
memory = ZepMemory(
session_id=session_id, # Identifies your user or a user's session
url=ZEP_API_URL, # Your Zep server's URL
api_key=<your_api_key>, # Optional
memory_key="history", # Ensure this matches the key used in
# chain's prompt template
return_messages=True, # Does your prompt template expect a string
# or a list of Messages?
)
chain = LLMChain(memory=memory,...) # Configure your chain to use the ZepMemory
instance
Note:
To persist metadata alongside your chat history, your will need to create a
custom Chain class that overrides the `prep_outputs` method to include the metadata
in the call to `self.memory.save_context`.
Zep - Fast, scalable building blocks for LLM Apps
=========
Zep is an open source platform for productionizing LLM apps. Go from a prototype
built in LangChain or LlamaIndex, or a custom app, to production in minutes without
rewriting code.
For server installation instructions and more, see:
https://docs.getzep.com/deployment/quickstart/
For more information on the zep-python package, see:
https://github.com/getzep/zep-python
"""
chat_memory: ZepChatMessageHistory
def __init__(
self,
session_id: str,
url: str = "http://localhost:8000",
api_key: Optional[str] = None,
output_key: Optional[str] = None,
input_key: Optional[str] = None,
return_messages: bool = False,
human_prefix: str = "Human",
ai_prefix: str = "AI",
memory_key: str = "history",
):
"""Initialize ZepMemory.
Args:
session_id (str): Identifies your user or a user's session
url (str, optional): Your Zep server's URL. Defaults to
"http://localhost:8000".
api_key (Optional[str], optional): Your Zep API key. Defaults to None.
output_key (Optional[str], optional): The key to use for the output message.
Defaults to None.
input_key (Optional[str], optional): The key to use for the input message.
Defaults to None.
return_messages (bool, optional): Does your prompt template expect a string
or a list of Messages? Defaults to False
i.e. return a string.
human_prefix (str, optional): The prefix to use for human messages.
Defaults to "Human".
ai_prefix (str, optional): The prefix to use for AI messages.
Defaults to "AI".
memory_key (str, optional): The key to use for the memory.
Defaults to "history".
Ensure that this matches the key used in
chain's prompt template.
"""
chat_message_history = ZepChatMessageHistory(
session_id=session_id,
url=url,
api_key=api_key,
)
super().__init__(
chat_memory=chat_message_history,
output_key=output_key,
input_key=input_key,
return_messages=return_messages,
human_prefix=human_prefix,
ai_prefix=ai_prefix,
memory_key=memory_key,
)
def save_context(
self,
inputs: Dict[str, Any],
outputs: Dict[str, str],
metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Save context from this conversation to buffer.
Args:
inputs (Dict[str, Any]): The inputs to the chain.
outputs (Dict[str, str]): The outputs from the chain.
metadata (Optional[Dict[str, Any]], optional): Any metadata to save with
the context. Defaults to None
Returns:
None
"""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str, metadata=metadata)
self.chat_memory.add_ai_message(output_str, metadata=metadata)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~multion~update_session.py | from typing import TYPE_CHECKING, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.base import BaseTool
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this so pydantic can resolve the types when instantiating
import multion
except ImportError:
pass
class UpdateSessionSchema(BaseModel):
"""Input for UpdateSessionTool."""
tabId: str = Field(
..., description="The tabID, received from one of the createSessions run before"
)
query: str = Field(
...,
description="The query to run in multion agent.",
)
url: str = Field(
"https://www.google.com/",
description="""The Url to run the agent at. \
Note: accepts only secure links having https://""",
)
class MultionUpdateSession(BaseTool):
"""Tool that updates an existing Multion Browser Window with provided fields.
Attributes:
name: The name of the tool. Default: "update_multion_session"
description: The description of the tool.
args_schema: The schema for the tool's arguments. Default: UpdateSessionSchema
"""
name: str = "update_multion_session"
description: str = """Use this tool to update \
an existing corresponding Multion Browser Window with provided fields. \
Note: TabId must be received from previous Browser window creation."""
args_schema: Type[UpdateSessionSchema] = UpdateSessionSchema
tabId: str = ""
def _run(
self,
tabId: str,
query: str,
url: Optional[str] = "https://www.google.com/",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> dict:
try:
try:
response = multion.update_session(tabId, {"input": query, "url": url})
content = {"tabId": tabId, "Response": response["message"]}
self.tabId = tabId
return content
except Exception as e:
print(f"{e}, retrying...")
return {"error": f"{e}", "Response": "retrying..."}
# response = multion.new_session({"input": query, "url": url})
# self.tabID = response["tabId"]
# return {"tabId": response["tabId"], "Response": response["message"]}
except Exception as e:
raise Exception(f"An error occurred: {e}")
| [
"Use this tool to update an existing corresponding Multion Browser Window with provided fields. Note: TabId must be received from previous Browser window creation."
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~octoai_endpoint.py | from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class OctoAIEndpoint(LLM):
"""OctoAI LLM Endpoints.
OctoAIEndpoint is a class to interact with OctoAI
Compute Service large language model endpoints.
To use, you should have the ``octoai`` python package installed, and the
environment variable ``OCTOAI_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms.octoai_endpoint import OctoAIEndpoint
OctoAIEndpoint(
octoai_api_token="octoai-api-key",
endpoint_url="https://mpt-7b-demo-f1kzsig6xes9.octoai.run/generate",
model_kwargs={
"max_new_tokens": 200,
"temperature": 0.75,
"top_p": 0.95,
"repetition_penalty": 1,
"seed": None,
"stop": [],
},
)
from langchain.llms.octoai_endpoint import OctoAIEndpoint
OctoAIEndpoint(
octoai_api_token="octoai-api-key",
endpoint_url="https://llama-2-7b-chat-demo-kk0powt97tmb.octoai.run/v1/chat/completions",
model_kwargs={
"model": "llama-2-7b-chat",
"messages": [
{
"role": "system",
"content": "Below is an instruction that describes a task.
Write a response that completes the request."
}
],
"stream": False,
"max_tokens": 256
}
)
"""
endpoint_url: Optional[str] = None
"""Endpoint URL to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
octoai_api_token: Optional[str] = None
"""OCTOAI API Token"""
streaming: bool = False
"""Whether to generate a stream of tokens asynchronously"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
octoai_api_token = get_from_dict_or_env(
values, "octoai_api_token", "OCTOAI_API_TOKEN"
)
values["endpoint_url"] = get_from_dict_or_env(
values, "endpoint_url", "ENDPOINT_URL"
)
values["octoai_api_token"] = octoai_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "octoai_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to OctoAI's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
_model_kwargs = self.model_kwargs or {}
try:
# Initialize the OctoAI client
from octoai import client
octoai_client = client.Client(token=self.octoai_api_token)
if "model" in _model_kwargs and "llama-2" in _model_kwargs["model"]:
parameter_payload = _model_kwargs
parameter_payload["messages"].append(
{"role": "user", "content": prompt}
)
# Send the request using the OctoAI client
output = octoai_client.infer(self.endpoint_url, parameter_payload)
text = output.get("choices")[0].get("message").get("content")
else:
# Prepare the payload JSON
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# Send the request using the OctoAI client
resp_json = octoai_client.infer(self.endpoint_url, parameter_payload)
text = resp_json["generated_text"]
except Exception as e:
# Handle any errors raised by the inference endpoint
raise ValueError(f"Error raised by the inference endpoint: {e}") from e
if stop is not None:
# Apply stop tokens when making calls to OctoAI
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~dropbox.py | # Prerequisites:
# 1. Create a Dropbox app.
# 2. Give the app these scope permissions: `files.metadata.read`
# and `files.content.read`.
# 3. Generate access token: https://www.dropbox.com/developers/apps/create.
# 4. `pip install dropbox` (requires `pip install unstructured` for PDF filetype).
import os
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.pydantic_v1 import BaseModel, root_validator
class DropboxLoader(BaseLoader, BaseModel):
"""Load files from `Dropbox`.
In addition to common files such as text and PDF files, it also supports
*Dropbox Paper* files.
"""
dropbox_access_token: str
"""Dropbox access token."""
dropbox_folder_path: Optional[str] = None
"""The folder path to load from."""
dropbox_file_paths: Optional[List[str]] = None
"""The file paths to load from."""
recursive: bool = False
"""Flag to indicate whether to load files recursively from subfolders."""
@root_validator
def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that either folder_path or file_paths is set, but not both."""
if (
values.get("dropbox_folder_path") is not None
and values.get("dropbox_file_paths") is not None
):
raise ValueError("Cannot specify both folder_path and file_paths")
if values.get("dropbox_folder_path") is None and not values.get(
"dropbox_file_paths"
):
raise ValueError("Must specify either folder_path or file_paths")
return values
def _create_dropbox_client(self) -> Any:
"""Create a Dropbox client."""
try:
from dropbox import Dropbox, exceptions
except ImportError:
raise ImportError("You must run " "`pip install dropbox")
try:
dbx = Dropbox(self.dropbox_access_token)
dbx.users_get_current_account()
except exceptions.AuthError as ex:
raise ValueError(
"Invalid Dropbox access token. Please verify your token and try again."
) from ex
return dbx
def _load_documents_from_folder(self, folder_path: str) -> List[Document]:
"""Load documents from a Dropbox folder."""
dbx = self._create_dropbox_client()
try:
from dropbox import exceptions
from dropbox.files import FileMetadata
except ImportError:
raise ImportError("You must run " "`pip install dropbox")
try:
results = dbx.files_list_folder(folder_path, recursive=self.recursive)
except exceptions.ApiError as ex:
raise ValueError(
f"Could not list files in the folder: {folder_path}. "
"Please verify the folder path and try again."
) from ex
files = [entry for entry in results.entries if isinstance(entry, FileMetadata)]
documents = [
doc
for doc in (self._load_file_from_path(file.path_display) for file in files)
if doc is not None
]
return documents
def _load_file_from_path(self, file_path: str) -> Optional[Document]:
"""Load a file from a Dropbox path."""
dbx = self._create_dropbox_client()
try:
from dropbox import exceptions
except ImportError:
raise ImportError("You must run " "`pip install dropbox")
try:
file_metadata = dbx.files_get_metadata(file_path)
if file_metadata.is_downloadable:
_, response = dbx.files_download(file_path)
# Some types such as Paper, need to be exported.
elif file_metadata.export_info:
_, response = dbx.files_export(file_path, "markdown")
except exceptions.ApiError as ex:
raise ValueError(
f"Could not load file: {file_path}. Please verify the file path"
"and try again."
) from ex
try:
text = response.content.decode("utf-8")
except UnicodeDecodeError:
print(f"File {file_path} could not be decoded as text. Skipping.")
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension == ".pdf":
from langchain.document_loaders import UnstructuredPDFLoader
# Download it to a temporary file.
temp_dir = tempfile.TemporaryDirectory()
temp_pdf = Path(temp_dir.name) / "tmp.pdf"
with open(temp_pdf, mode="wb") as f:
f.write(response.content)
try:
loader = UnstructuredPDFLoader(str(temp_pdf))
docs = loader.load()
if docs:
return docs[0]
except Exception as pdf_ex:
print(f"Error while trying to parse PDF {file_path}: {pdf_ex}")
return None
return None
metadata = {
"source": f"dropbox://{file_path}",
"title": os.path.basename(file_path),
}
return Document(page_content=text, metadata=metadata)
def _load_documents_from_paths(self) -> List[Document]:
"""Load documents from a list of Dropbox file paths."""
if not self.dropbox_file_paths:
raise ValueError("file_paths must be set")
return [
doc
for doc in (
self._load_file_from_path(file_path)
for file_path in self.dropbox_file_paths
)
if doc is not None
]
def load(self) -> List[Document]:
"""Load documents."""
if self.dropbox_folder_path is not None:
return self._load_documents_from_folder(self.dropbox_folder_path)
else:
return self._load_documents_from_paths()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~sql_database.py | """SQLAlchemy wrapper around a database."""
from __future__ import annotations
import warnings
from typing import Any, Dict, Iterable, List, Literal, Optional, Sequence, Union
import sqlalchemy
from sqlalchemy import MetaData, Table, create_engine, inspect, select, text
from sqlalchemy.engine import Engine
from sqlalchemy.exc import ProgrammingError, SQLAlchemyError
from sqlalchemy.schema import CreateTable
from sqlalchemy.types import NullType
from langchain.utils import get_from_env
def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str:
return (
f'Name: {index["name"]}, Unique: {index["unique"]},'
f' Columns: {str(index["column_names"])}'
)
def truncate_word(content: Any, *, length: int, suffix: str = "...") -> str:
"""
Truncate a string to a certain number of words, based on the max string
length.
"""
if not isinstance(content, str) or length <= 0:
return content
if len(content) <= length:
return content
return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix
class SQLDatabase:
"""SQLAlchemy wrapper around a database."""
def __init__(
self,
engine: Engine,
schema: Optional[str] = None,
metadata: Optional[MetaData] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
indexes_in_table_info: bool = False,
custom_table_info: Optional[dict] = None,
view_support: bool = False,
max_string_length: int = 300,
):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema
if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
# including view support by adding the views as well as tables to the all
# tables list if view_support is True
self._all_tables = set(
self._inspector.get_table_names(schema=schema)
+ (self._inspector.get_view_names(schema=schema) if view_support else [])
)
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
self._indexes_in_table_info = indexes_in_table_info
self._custom_table_info = custom_table_info
if self._custom_table_info:
if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
# only keep the tables that are also present in the database
intersection = set(self._custom_table_info).intersection(self._all_tables)
self._custom_table_info = dict(
(table, self._custom_table_info[table])
for table in self._custom_table_info
if table in intersection
)
self._max_string_length = max_string_length
self._metadata = metadata or MetaData()
# including view support if view_support = true
self._metadata.reflect(
views=view_support,
bind=self._engine,
only=list(self._usable_tables),
schema=self._schema,
)
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> SQLDatabase:
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
@classmethod
def from_databricks(
cls,
catalog: str,
schema: str,
host: Optional[str] = None,
api_token: Optional[str] = None,
warehouse_id: Optional[str] = None,
cluster_id: Optional[str] = None,
engine_args: Optional[dict] = None,
**kwargs: Any,
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a Databricks connection.
This method requires the 'databricks-sql-connector' package. If not installed,
it can be added using `pip install databricks-sql-connector`.
Args:
catalog (str): The catalog name in the Databricks database.
schema (str): The schema name in the catalog.
host (Optional[str]): The Databricks workspace hostname, excluding
'https://' part. If not provided, it attempts to fetch from the
environment variable 'DATABRICKS_HOST'. If still unavailable and if
running in a Databricks notebook, it defaults to the current workspace
hostname. Defaults to None.
api_token (Optional[str]): The Databricks personal access token for
accessing the Databricks SQL warehouse or the cluster. If not provided,
it attempts to fetch from 'DATABRICKS_TOKEN'. If still unavailable
and running in a Databricks notebook, a temporary token for the current
user is generated. Defaults to None.
warehouse_id (Optional[str]): The warehouse ID in the Databricks SQL. If
provided, the method configures the connection to use this warehouse.
Cannot be used with 'cluster_id'. Defaults to None.
cluster_id (Optional[str]): The cluster ID in the Databricks Runtime. If
provided, the method configures the connection to use this cluster.
Cannot be used with 'warehouse_id'. If running in a Databricks notebook
and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the
cluster the notebook is attached to. Defaults to None.
engine_args (Optional[dict]): The arguments to be used when connecting
Databricks. Defaults to None.
**kwargs (Any): Additional keyword arguments for the `from_uri` method.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
Databricks connection details.
Raises:
ValueError: If 'databricks-sql-connector' is not found, or if both
'warehouse_id' and 'cluster_id' are provided, or if neither
'warehouse_id' nor 'cluster_id' are provided and it's not executing
inside a Databricks notebook.
"""
try:
from databricks import sql # noqa: F401
except ImportError:
raise ValueError(
"databricks-sql-connector package not found, please install with"
" `pip install databricks-sql-connector`"
)
context = None
try:
from dbruntime.databricks_repl_context import get_context
context = get_context()
except ImportError:
pass
default_host = context.browserHostName if context else None
if host is None:
host = get_from_env("host", "DATABRICKS_HOST", default_host)
default_api_token = context.apiToken if context else None
if api_token is None:
api_token = get_from_env("api_token", "DATABRICKS_TOKEN", default_api_token)
if warehouse_id is None and cluster_id is None:
if context:
cluster_id = context.clusterId
else:
raise ValueError(
"Need to provide either 'warehouse_id' or 'cluster_id'."
)
if warehouse_id and cluster_id:
raise ValueError("Can't have both 'warehouse_id' or 'cluster_id'.")
if warehouse_id:
http_path = f"/sql/1.0/warehouses/{warehouse_id}"
else:
http_path = f"/sql/protocolv1/o/0/{cluster_id}"
uri = (
f"databricks://token:{api_token}@{host}?"
f"http_path={http_path}&catalog={catalog}&schema={schema}"
)
return cls.from_uri(database_uri=uri, engine_args=engine_args, **kwargs)
@classmethod
def from_cnosdb(
cls,
url: str = "127.0.0.1:8902",
user: str = "root",
password: str = "",
tenant: str = "cnosdb",
database: str = "public",
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a CnosDB connection.
This method requires the 'cnos-connector' package. If not installed, it
can be added using `pip install cnos-connector`.
Args:
url (str): The HTTP connection host name and port number of the CnosDB
service, excluding "http://" or "https://", with a default value
of "127.0.0.1:8902".
user (str): The username used to connect to the CnosDB service, with a
default value of "root".
password (str): The password of the user connecting to the CnosDB service,
with a default value of "".
tenant (str): The name of the tenant used to connect to the CnosDB service,
with a default value of "cnosdb".
database (str): The name of the database in the CnosDB tenant.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
CnosDB connection details.
"""
try:
from cnosdb_connector import make_cnosdb_langchain_uri
uri = make_cnosdb_langchain_uri(url, user, password, tenant, database)
return cls.from_uri(database_uri=uri)
except ImportError:
raise ValueError(
"cnos-connector package not found, please install with"
" `pip install cnos-connector`"
)
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self._engine.dialect.name
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return sorted(self._include_tables)
return sorted(self._all_tables - self._ignore_tables)
def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
warnings.warn(
"This method is deprecated - please use `get_usable_table_names`."
)
return self.get_usable_table_names()
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database")
all_table_names = table_names
meta_tables = [
tbl
for tbl in self._metadata.sorted_tables
if tbl.name in set(all_table_names)
and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_"))
]
tables = []
for table in meta_tables:
if self._custom_table_info and table.name in self._custom_table_info:
tables.append(self._custom_table_info[table.name])
continue
# Ignore JSON datatyped columns
for k, v in table.columns.items():
if type(v.type) is NullType:
table._columns.remove(v)
# add create table command
create_table = str(CreateTable(table).compile(self._engine))
table_info = f"{create_table.rstrip()}"
has_extra_info = (
self._indexes_in_table_info or self._sample_rows_in_table_info
)
if has_extra_info:
table_info += "\n\n/*"
if self._indexes_in_table_info:
table_info += f"\n{self._get_table_indexes(table)}\n"
if self._sample_rows_in_table_info:
table_info += f"\n{self._get_sample_rows(table)}\n"
if has_extra_info:
table_info += "*/"
tables.append(table_info)
tables.sort()
final_str = "\n\n".join(tables)
return final_str
def _get_table_indexes(self, table: Table) -> str:
indexes = self._inspector.get_indexes(table.name)
indexes_formatted = "\n".join(map(_format_index, indexes))
return f"Table Indexes:\n{indexes_formatted}"
def _get_sample_rows(self, table: Table) -> str:
# build the select command
command = select(table).limit(self._sample_rows_in_table_info)
# save the columns in string format
columns_str = "\t".join([col.name for col in table.columns])
try:
# get the sample rows
with self._engine.connect() as connection:
sample_rows_result = connection.execute(command) # type: ignore
# shorten values in the sample rows
sample_rows = list(
map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result)
)
# save the sample rows in string format
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
# in some dialects when there are no rows in the table a
# 'ProgrammingError' is returned
except ProgrammingError:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table.name} table:\n"
f"{columns_str}\n"
f"{sample_rows_str}"
)
def _execute(
self,
command: str,
fetch: Union[Literal["all"], Literal["one"]] = "all",
) -> Sequence[Dict[str, Any]]:
"""
Executes SQL command through underlying engine.
If the statement returns no rows, an empty list is returned.
"""
with self._engine.begin() as connection:
if self._schema is not None:
if self.dialect == "snowflake":
connection.exec_driver_sql(
"ALTER SESSION SET search_path = %s", (self._schema,)
)
elif self.dialect == "bigquery":
connection.exec_driver_sql("SET @@dataset_id=?", (self._schema,))
elif self.dialect == "mssql":
pass
elif self.dialect == "trino":
connection.exec_driver_sql("USE ?", (self._schema,))
elif self.dialect == "duckdb":
# Unclear which parameterized argument syntax duckdb supports.
# The docs for the duckdb client say they support multiple,
# but `duckdb_engine` seemed to struggle with all of them:
# https://github.com/Mause/duckdb_engine/issues/796
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
else: # postgresql and other compatible dialects
connection.exec_driver_sql("SET search_path TO %s", (self._schema,))
cursor = connection.execute(text(command))
if cursor.returns_rows:
if fetch == "all":
result = [x._asdict() for x in cursor.fetchall()]
elif fetch == "one":
first_result = cursor.fetchone()
result = [] if first_result is None else [first_result._asdict()]
else:
raise ValueError("Fetch parameter must be either 'one' or 'all'")
return result
return []
def run(
self,
command: str,
fetch: Union[Literal["all"], Literal["one"]] = "all",
) -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
result = self._execute(command, fetch)
# Convert columns values to string to avoid issues with sqlalchemy
# truncating text
res = [
tuple(truncate_word(c, length=self._max_string_length) for c in r.values())
for r in result
]
if not res:
return ""
else:
return str(res)
def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
def run_no_throw(
self,
command: str,
fetch: Union[Literal["all"], Literal["one"]] = "all",
) -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(command, fetch)
except SQLAlchemyError as e:
"""Format the error message"""
return f"Error: {e}"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~python.py | import tokenize
from langchain.document_loaders.text import TextLoader
class PythonLoader(TextLoader):
"""Load `Python` files, respecting any non-default encoding if specified."""
def __init__(self, file_path: str):
"""Initialize with a file path.
Args:
file_path: The path to the file to load.
"""
with open(file_path, "rb") as f:
encoding, _ = tokenize.detect_encoding(f.readline)
super().__init__(file_path=file_path, encoding=encoding)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~minimax.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
import requests
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator."""
multiplier = 1
min_seconds = 1
max_seconds = 4
max_retries = 6
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) -> Any:
return embeddings.embed(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
class MiniMaxEmbeddings(BaseModel, Embeddings):
"""MiniMax's embedding service.
To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and
``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to
the constructor.
Example:
.. code-block:: python
from langchain.embeddings import MiniMaxEmbeddings
embeddings = MiniMaxEmbeddings()
query_text = "This is a test query."
query_result = embeddings.embed_query(query_text)
document_text = "This is a test document."
document_result = embeddings.embed_documents([document_text])
"""
endpoint_url: str = "https://api.minimax.chat/v1/embeddings"
"""Endpoint URL to use."""
model: str = "embo-01"
"""Embeddings model name to use."""
embed_type_db: str = "db"
"""For embed_documents"""
embed_type_query: str = "query"
"""For embed_query"""
minimax_group_id: Optional[str] = None
"""Group ID for MiniMax API."""
minimax_api_key: Optional[str] = None
"""API Key for MiniMax API."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that group id and api key exists in environment."""
minimax_group_id = get_from_dict_or_env(
values, "minimax_group_id", "MINIMAX_GROUP_ID"
)
minimax_api_key = get_from_dict_or_env(
values, "minimax_api_key", "MINIMAX_API_KEY"
)
values["minimax_group_id"] = minimax_group_id
values["minimax_api_key"] = minimax_api_key
return values
def embed(
self,
texts: List[str],
embed_type: str,
) -> List[List[float]]:
payload = {
"model": self.model,
"type": embed_type,
"texts": texts,
}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.minimax_api_key}",
"Content-Type": "application/json",
}
params = {
"GroupId": self.minimax_group_id,
}
# send request
response = requests.post(
self.endpoint_url, params=params, headers=headers, json=payload
)
parsed_response = response.json()
# check for errors
if parsed_response["base_resp"]["status_code"] != 0:
raise ValueError(
f"MiniMax API returned an error: {parsed_response['base_resp']}"
)
embeddings = parsed_response["vectors"]
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a MiniMax embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(self, texts=texts, embed_type=self.embed_type_db)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a MiniMax embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = embed_with_retry(
self, texts=[text], embed_type=self.embed_type_query
)
return embeddings[0]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~gmail~send_message.py | """Send Gmail messages."""
import base64
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
message: str = Field(
...,
description="The message to send.",
)
to: Union[str, List[str]] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[Union[str, List[str]]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[Union[str, List[str]]] = Field(
None,
description="The list of BCC recipients.",
)
class GmailSendMessage(GmailBaseTool):
"""Tool that sends a message to Gmail."""
name: str = "send_gmail_message"
description: str = (
"Use this tool to send email messages." " The input is the message, recipients"
)
def _prepare_message(
self,
message: str,
to: Union[str, List[str]],
subject: str,
cc: Optional[Union[str, List[str]]] = None,
bcc: Optional[Union[str, List[str]]] = None,
) -> Dict[str, Any]:
"""Create a message for an email."""
mime_message = MIMEMultipart()
mime_message.attach(MIMEText(message, "html"))
mime_message["To"] = ", ".join(to if isinstance(to, list) else [to])
mime_message["Subject"] = subject
if cc is not None:
mime_message["Cc"] = ", ".join(cc if isinstance(cc, list) else [cc])
if bcc is not None:
mime_message["Bcc"] = ", ".join(bcc if isinstance(bcc, list) else [bcc])
encoded_message = base64.urlsafe_b64encode(mime_message.as_bytes()).decode()
return {"raw": encoded_message}
def _run(
self,
message: str,
to: Union[str, List[str]],
subject: str,
cc: Optional[Union[str, List[str]]] = None,
bcc: Optional[Union[str, List[str]]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Run the tool."""
try:
create_message = self._prepare_message(message, to, subject, cc=cc, bcc=bcc)
send_message = (
self.api_resource.users()
.messages()
.send(userId="me", body=create_message)
)
sent_message = send_message.execute()
return f'Message sent. Message Id: {sent_message["id"]}'
except Exception as error:
raise Exception(f"An error occurred: {error}")
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~test_kuzu.py | import shutil
import tempfile
import unittest
from langchain.graphs import KuzuGraph
EXPECTED_SCHEMA = """
Node properties: [{'properties': [('name', 'STRING')], 'label': 'Movie'}, {'properties': [('name', 'STRING'), ('birthDate', 'STRING')], 'label': 'Person'}]
Relationships properties: [{'properties': [], 'label': 'ActedIn'}]
Relationships: ['(:Person)-[:ActedIn]->(:Movie)']
""" # noqa: E501
class TestKuzu(unittest.TestCase):
def setUp(self) -> None:
try:
import kuzu
except ImportError as e:
raise ImportError(
"Cannot import Python package kuzu. Please install it by running "
"`pip install kuzu`."
) from e
self.tmpdir = tempfile.mkdtemp()
self.kuzu_database = kuzu.Database(self.tmpdir)
self.conn = kuzu.Connection(self.kuzu_database)
self.conn.execute("CREATE NODE TABLE Movie (name STRING, PRIMARY KEY(name))")
self.conn.execute("CREATE (:Movie {name: 'The Godfather'})")
self.conn.execute("CREATE (:Movie {name: 'The Godfather: Part II'})")
self.conn.execute(
"CREATE (:Movie {name: 'The Godfather Coda: The Death of Michael "
"Corleone'})"
)
self.kuzu_graph = KuzuGraph(self.kuzu_database)
def tearDown(self) -> None:
shutil.rmtree(self.tmpdir, ignore_errors=True)
def test_query(self) -> None:
result = self.kuzu_graph.query("MATCH (n:Movie) RETURN n.name ORDER BY n.name")
excepted_result = [
{"n.name": "The Godfather"},
{"n.name": "The Godfather Coda: The Death of Michael Corleone"},
{"n.name": "The Godfather: Part II"},
]
self.assertEqual(result, excepted_result)
def test_refresh_schema(self) -> None:
self.conn.execute(
"CREATE NODE TABLE Person (name STRING, birthDate STRING, PRIMARY "
"KEY(name))"
)
self.conn.execute("CREATE REL TABLE ActedIn (FROM Person TO Movie)")
self.kuzu_graph.refresh_schema()
schema = self.kuzu_graph.get_schema
self.assertEqual(schema, EXPECTED_SCHEMA)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~edenai~ocr_invoiceparser.py | from __future__ import annotations
import logging
from typing import Optional
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class EdenAiParsingInvoiceTool(EdenaiTool):
"""Tool that queries the Eden AI Invoice parsing API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/ocr_invoice_parser_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name = "edenai_invoice_parsing"
description = (
"A wrapper around edenai Services invoice parsing. "
"""Useful for when you have to extract information from
an image it enables to take invoices
in a variety of formats and returns the data in contains
(items, prices, addresses, vendor name, etc.)
in a structured format to automate the invoice processing """
"Input should be the string url of the document to parse."
)
language: Optional[str] = None
"""
language of the image passed to the model.
"""
feature = "ocr"
subfeature = "invoice_parser"
def _parse_response(self, response: list) -> str:
formatted_list: list = []
if len(response) == 1:
self._parse_json_multilevel(
response[0]["extracted_data"][0], formatted_list
)
else:
for entry in response:
if entry.get("provider") == "eden-ai":
self._parse_json_multilevel(
entry["extracted_data"][0], formatted_list
)
return "\n".join(formatted_list)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {
"file_url": query,
"language": self.language,
"attributes_as_list": False,
}
return self._call_eden_ai(query_params)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~output_parsers~self_ask.py | from typing import Sequence, Union
from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
class SelfAskOutputParser(AgentOutputParser):
"""Parses self-ask style LLM calls.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thoughts go here...
Follow up: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thoughts go here...
So the final answer is: The temperature is 100 degrees
```
"""
followups: Sequence[str] = ("Follow up:", "Followup:")
finish_string: str = "So the final answer is: "
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
last_line = text.split("\n")[-1]
if not any([follow in last_line for follow in self.followups]):
if self.finish_string not in last_line:
raise OutputParserException(f"Could not parse output: {text}")
return AgentFinish({"output": last_line[len(self.finish_string) :]}, text)
after_colon = text.split(":")[-1].strip()
return AgentAction("Intermediate Answer", after_colon, text)
@property
def _type(self) -> str:
return "self_ask"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~joplin.py | import json
import urllib
from datetime import datetime
from typing import Iterator, List, Optional
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
from langchain.utils import get_from_env
LINK_NOTE_TEMPLATE = "joplin://x-callback-url/openNote?id={id}"
class JoplinLoader(BaseLoader):
"""Load notes from `Joplin`.
In order to use this loader, you need to have Joplin running with the
Web Clipper enabled (look for "Web Clipper" in the app settings).
To get the access token, you need to go to the Web Clipper options and
under "Advanced Options" you will find the access token.
You can find more information about the Web Clipper service here:
https://joplinapp.org/clipper/
"""
def __init__(
self,
access_token: Optional[str] = None,
port: int = 41184,
host: str = "localhost",
) -> None:
"""
Args:
access_token: The access token to use.
port: The port where the Web Clipper service is running. Default is 41184.
host: The host where the Web Clipper service is running.
Default is localhost.
"""
access_token = access_token or get_from_env(
"access_token", "JOPLIN_ACCESS_TOKEN"
)
base_url = f"http://{host}:{port}"
self._get_note_url = (
f"{base_url}/notes?token={access_token}"
f"&fields=id,parent_id,title,body,created_time,updated_time&page={{page}}"
)
self._get_folder_url = (
f"{base_url}/folders/{{id}}?token={access_token}&fields=title"
)
self._get_tag_url = (
f"{base_url}/notes/{{id}}/tags?token={access_token}&fields=title"
)
def _get_notes(self) -> Iterator[Document]:
has_more = True
page = 1
while has_more:
req_note = urllib.request.Request(self._get_note_url.format(page=page))
with urllib.request.urlopen(req_note) as response:
json_data = json.loads(response.read().decode())
for note in json_data["items"]:
metadata = {
"source": LINK_NOTE_TEMPLATE.format(id=note["id"]),
"folder": self._get_folder(note["parent_id"]),
"tags": self._get_tags(note["id"]),
"title": note["title"],
"created_time": self._convert_date(note["created_time"]),
"updated_time": self._convert_date(note["updated_time"]),
}
yield Document(page_content=note["body"], metadata=metadata)
has_more = json_data["has_more"]
page += 1
def _get_folder(self, folder_id: str) -> str:
req_folder = urllib.request.Request(self._get_folder_url.format(id=folder_id))
with urllib.request.urlopen(req_folder) as response:
json_data = json.loads(response.read().decode())
return json_data["title"]
def _get_tags(self, note_id: str) -> List[str]:
req_tag = urllib.request.Request(self._get_tag_url.format(id=note_id))
with urllib.request.urlopen(req_tag) as response:
json_data = json.loads(response.read().decode())
return [tag["title"] for tag in json_data["items"]]
def _convert_date(self, date: int) -> str:
return datetime.fromtimestamp(date / 1000).strftime("%Y-%m-%d %H:%M:%S")
def lazy_load(self) -> Iterator[Document]:
yield from self._get_notes()
def load(self) -> List[Document]:
return list(self.lazy_load())
| [
"joplin://x-callback-url/openNote?id={id}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~telegram.py | from __future__ import annotations
import asyncio
import json
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
if TYPE_CHECKING:
import pandas as pd
from telethon.hints import EntityLike
def concatenate_rows(row: dict) -> str:
"""Combine message information in a readable format ready to be used."""
date = row["date"]
sender = row["from"]
text = row["text"]
return f"{sender} on {date}: {text}\n\n"
class TelegramChatFileLoader(BaseLoader):
"""Load from `Telegram chat` dump."""
def __init__(self, path: str):
"""Initialize with a path."""
self.file_path = path
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
text = "".join(
concatenate_rows(message)
for message in d["messages"]
if message["type"] == "message" and isinstance(message["text"], str)
)
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
def text_to_docs(text: Union[str, List[str]]) -> List[Document]:
"""Convert a string or list of strings to a list of Documents with metadata."""
if isinstance(text, str):
# Take a single string as one page
text = [text]
page_docs = [Document(page_content=page) for page in text]
# Add page numbers as metadata
for i, doc in enumerate(page_docs):
doc.metadata["page"] = i + 1
# Split pages into chunks
doc_chunks = []
for doc in page_docs:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=20,
)
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i}
)
# Add sources a metadata
doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}"
doc_chunks.append(doc)
return doc_chunks
class TelegramChatApiLoader(BaseLoader):
"""Load `Telegram` chat json directory dump."""
def __init__(
self,
chat_entity: Optional[EntityLike] = None,
api_id: Optional[int] = None,
api_hash: Optional[str] = None,
username: Optional[str] = None,
file_path: str = "telegram_data.json",
):
"""Initialize with API parameters.
Args:
chat_entity: The chat entity to fetch data from.
api_id: The API ID.
api_hash: The API hash.
username: The username.
file_path: The file path to save the data to. Defaults to
"telegram_data.json".
"""
self.chat_entity = chat_entity
self.api_id = api_id
self.api_hash = api_hash
self.username = username
self.file_path = file_path
async def fetch_data_from_telegram(self) -> None:
"""Fetch data from Telegram API and save it as a JSON file."""
from telethon.sync import TelegramClient
data = []
async with TelegramClient(self.username, self.api_id, self.api_hash) as client:
async for message in client.iter_messages(self.chat_entity):
is_reply = message.reply_to is not None
reply_to_id = message.reply_to.reply_to_msg_id if is_reply else None
data.append(
{
"sender_id": message.sender_id,
"text": message.text,
"date": message.date.isoformat(),
"message.id": message.id,
"is_reply": is_reply,
"reply_to_id": reply_to_id,
}
)
with open(self.file_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def _get_message_threads(self, data: pd.DataFrame) -> dict:
"""Create a dictionary of message threads from the given data.
Args:
data (pd.DataFrame): A DataFrame containing the conversation \
data with columns:
- message.sender_id
- text
- date
- message.id
- is_reply
- reply_to_id
Returns:
dict: A dictionary where the key is the parent message ID and \
the value is a list of message IDs in ascending order.
"""
def find_replies(parent_id: int, reply_data: pd.DataFrame) -> List[int]:
"""
Recursively find all replies to a given parent message ID.
Args:
parent_id (int): The parent message ID.
reply_data (pd.DataFrame): A DataFrame containing reply messages.
Returns:
list: A list of message IDs that are replies to the parent message ID.
"""
# Find direct replies to the parent message ID
direct_replies = reply_data[reply_data["reply_to_id"] == parent_id][
"message.id"
].tolist()
# Recursively find replies to the direct replies
all_replies = []
for reply_id in direct_replies:
all_replies += [reply_id] + find_replies(reply_id, reply_data)
return all_replies
# Filter out parent messages
parent_messages = data[~data["is_reply"]]
# Filter out reply messages and drop rows with NaN in 'reply_to_id'
reply_messages = data[data["is_reply"]].dropna(subset=["reply_to_id"])
# Convert 'reply_to_id' to integer
reply_messages["reply_to_id"] = reply_messages["reply_to_id"].astype(int)
# Create a dictionary of message threads with parent message IDs as keys and \
# lists of reply message IDs as values
message_threads = {
parent_id: [parent_id] + find_replies(parent_id, reply_messages)
for parent_id in parent_messages["message.id"]
}
return message_threads
def _combine_message_texts(
self, message_threads: Dict[int, List[int]], data: pd.DataFrame
) -> str:
"""
Combine the message texts for each parent message ID based \
on the list of message threads.
Args:
message_threads (dict): A dictionary where the key is the parent message \
ID and the value is a list of message IDs in ascending order.
data (pd.DataFrame): A DataFrame containing the conversation data:
- message.sender_id
- text
- date
- message.id
- is_reply
- reply_to_id
Returns:
str: A combined string of message texts sorted by date.
"""
combined_text = ""
# Iterate through sorted parent message IDs
for parent_id, message_ids in message_threads.items():
# Get the message texts for the message IDs and sort them by date
message_texts = (
data[data["message.id"].isin(message_ids)]
.sort_values(by="date")["text"]
.tolist()
)
message_texts = [str(elem) for elem in message_texts]
# Combine the message texts
combined_text += " ".join(message_texts) + ".\n"
return combined_text.strip()
def load(self) -> List[Document]:
"""Load documents."""
if self.chat_entity is not None:
try:
import nest_asyncio
nest_asyncio.apply()
asyncio.run(self.fetch_data_from_telegram())
except ImportError:
raise ImportError(
"""`nest_asyncio` package not found.
please install with `pip install nest_asyncio`
"""
)
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
try:
import pandas as pd
except ImportError:
raise ImportError(
"""`pandas` package not found.
please install with `pip install pandas`
"""
)
normalized_messages = pd.json_normalize(d)
df = pd.DataFrame(normalized_messages)
message_threads = self._get_message_threads(df)
combined_texts = self._combine_message_texts(message_threads, df)
return text_to_docs(combined_texts)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~react~wiki_prompt.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
EXAMPLES = [
"""Question: Какой диапазон высот для территории, на которую распространяется восточный сектор Колорадского орогенеза?
Thought: Мне нужно найти информацию о Колорадском орогенезе, определить территорию, на которую распространяется его восточный сектор, а затем узнать диапазон высот этой территории.
Action: Поиск[Колорадский орогенез]
Observation: Колорадский орогенез - это эпизод формирования гор (орогенез) в Колорадо и окрестностях.
Thought: Здесь не упоминается восточный сектор. Поэтому мне нужно найти информацию о восточном секторе.
Action: Поиск[восточный сектор]
Observation: (Результат 1 / 1) Восточный сектор распространяется на Высокие равнины и называется орогенезом Центральных равнин.
Thought: Восточный сектор Колорадского орогенеза распространяется на Высокие равнины. Так что мне нужно найти информацию о Высоких равнинах и узнать их диапазон высот.
Action: Поиск[Высокие равнины]
Observation: Высокие равнины относятся к одному из двух отдельных регионов
Thought: Мне нужно искать Высокие равнины (Соединенные Штаты).
Action: Поиск[Высокие равнины (Соединенные Штаты)]
Observation: Высокие равнины - это подрегион Великих равнин. С востока на запад, Высокие равнины поднимаются на высоту от примерно 1,800 до 7,000 футов (550 до 2,130 м).
Thought: Высокие равнины поднимаются на высоту от примерно 1,800 до 7,000 футов, так что ответ - от 1,800 до 7,000 футов.
Action: Завершить[от 1,800 до 7,000 футов]""",
"""Question: Музыкант и сатирик Элли Гёртц написала песню о персонаже "Симпсонов" Милхаусе, которого Мэтт Грейнинг назвал в честь кого?
Thought: Вопрос упрощается до "Персонаж 'Симпсонов' Милхаус назван в честь кого. Мне просто нужно найти информацию о Милхаусе и узнать, в честь кого он назван.
Action: Поиск[Милхаус]
Observation: Милхаус Муссолини Ван Хаутен - это повторяющийся персонаж в анимационном телесериале Fox "Симпсоны", озвученный Памелой Хейден и созданный Мэттом Грейнингом.
Thought: В абзаце не говорится, в честь кого назван Милхаус, может быть, я могу найти информацию о "назван в честь".
Action: Поиск[назван в честь]
Observation: (Результат 1 / 1) Милхаус был назван в честь президента США Ричарда Никсона, чье среднее имя было Милхаус.
Thought: Милхаус был назван в честь президента США Ричарда Никсона, так что ответ - Ричард Никсон.
Action: Завершить[Ричард Никсон]""",
"""Question: Какой документальный фильм о финских рок-группах, "Адам Клейтон Пауэлл" или "Жест Саймаа"?
Thought: Мне нужно найти информацию о "Адаме Клейтоне Пауэлле" и "Жесте Саймаа", и узнать, какой из документальных фильмов о финских рок-группах.
Action: Поиск[Адам Клейтон Пауэлл]
Observation: Не удалось найти [Адам Клейтон Пауэлл]. Похожие: [’Адам Клейтон Пауэлл III’, ’Седьмой авеню (Манхэттен)’, ’Государственное здание Адама Клейтона Пауэлла-младшего’, ’Изабель Вашингтон Пауэлл’, ’Адам Пауэлл’, ’Адам Клейтон Пауэлл (фильм)’, ’Джанкарло Эспозито’].
Thought: Чтобы найти документальный фильм, я могу искать Адам Клейтон Пауэлл (фильм).
Action: Поиск[Адам Клейтон Пауэлл (фильм)]
Observation: "Адам Клейтон Пауэлл" - это американский документальный фильм 1989 года режиссера Ричарда Килберга. Фильм рассказывает о взлете и падении влиятельного афроамериканского политика Адама Клейтона Пауэлла-младшего.[3][4] Позже он был показан в рамках серии PBS "Американский опыт".
Thought: "Адам Клейтон Пауэлл" (фильм) - это документальный фильм об афроамериканском политике, а не о финских рок-группах. Так что документальный фильм о финских рок-группах должен быть "Жест Саймаа".
Action: Завершить["Жест Саймаа"]""",
"""Question: Какую профессию имеют общую Николас Рэй и Элиа Казан?
Thought: Мне нужно найти информацию о Николасе Рэе и Элии Казане, узнать их профессии, а затем определить, какую профессию они имеют общую.
Action: Поиск[Николас Рэй]
Observation: Николас Рэй (родился как Рэймонд Николас Кинцле-младший, 7 августа 1911 - 16 июня 1979) - это американский режиссер, сценарист и актер, наиболее известный по фильму 1955 года "Бунтарь без причины".
Thought: Профессии Николаса Рэя - режиссер, сценарист и актер. Мне нужно найти информацию о Элии Казане и узнать его профессии.
Action: Поиск[Элиа Казан]
Observation: Элиа Казан был американским режиссером кино и театра, продюсером, сценаристом и актером.
Thought: Профессии Элии Казана - режиссер, продюсер, сценарист и актер. Так что профессию, которую имеют общую Николас Рэй и Элиа Казан, - это режиссер, сценарист и актер.
Action: Завершить[режиссер, сценарист, актер]""",
"""Question: Какой журнал был основан первым, "Журнал Артура" или "Первый для женщин"?
Thought: Мне нужно найти информацию о "Журнале Артура" и "Первом для женщин", и узнать, какой из них был основан первым.
Action: Поиск["Журнал Артура"]
Observation: "Журнал Артура" (1844-1846) - это американский литературный периодический журнал, издававшийся в Филадельфии в 19 веке.
Thought: "Журнал Артура" был основан в 1844 году. Мне нужно найти информацию о "Первом для женщин".
Action: Поиск["Первый для женщин"]
Observation: "Первый для женщин" - это женский журнал, издаваемый Bauer Media Group в США.[1] Журнал был основан в 1989 году.
Thought: "Первый для женщин" был основан в 1989 году. 1844 ("Журнал Артура") < 1989 ("Первый для женщин"), так что "Журнал Артура" был основан первым.
Action: Завершить["Журнал Артура"]""",
"""Question: Были ли Павел Урысон и Леонид Левин известны тем же видом работы?
Thought: Мне нужно найти информацию о Павле Урысоне и Леониде Левине, узнать их виды работы, а затем определить, совпадают ли они.
Action: Поиск[Павел Урысон]
Observation: Павел Самуилович Урысон (3 февраля 1898 - 17 августа 1924) - это советский математик, наиболее известный своими вкладами в теорию размерности.
Thought: Павел Урысон - математик. Мне нужно найти информацию о Леониде Левине и узнать его вид работы.
Action: Поиск[Леонид Левин]
Observation: Леонид Анатольевич Левин - советско-американский математик и информатик.
Thought: Леонид Левин - математик и информатик. Так что Павел Урысон и Леонид Левин занимаются одним и тем же видом работы.
Action: Завершить[да]""",
]
SUFFIX = """\nQuestion: {input}
{agent_scratchpad}"""
WIKI_PROMPT = PromptTemplate.from_examples(
EXAMPLES, SUFFIX, ["input", "agent_scratchpad"]
)
| [
"\nQuestion: {input}\n{agent_scratchpad}",
"['input', 'agent_scratchpad']",
"input",
"agent_scratchpad"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~kay.py | from __future__ import annotations
from typing import Any, List
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.schema import BaseRetriever, Document
class KayAiRetriever(BaseRetriever):
"""
Retriever for Kay.ai datasets.
To work properly, expects you to have KAY_API_KEY env variable set.
You can get one for free at https://kay.ai/.
"""
client: Any
num_contexts: int
@classmethod
def create(
cls,
dataset_id: str,
data_types: List[str],
num_contexts: int = 6,
) -> KayAiRetriever:
"""
Create a KayRetriever given a Kay dataset id and a list of datasources.
Args:
dataset_id: A dataset id category in Kay, like "company"
data_types: A list of datasources present within a dataset. For
"company" the corresponding datasources could be
["10-K", "10-Q", "8-K", "PressRelease"].
num_contexts: The number of documents to retrieve on each query.
Defaults to 6.
"""
try:
from kay.rag.retrievers import KayRetriever
except ImportError:
raise ImportError(
"Could not import kay python package. Please install it with "
"`pip install kay`.",
)
client = KayRetriever(dataset_id, data_types)
return cls(client=client, num_contexts=num_contexts)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
ctxs = self.client.query(query=query, num_context=self.num_contexts)
docs = []
for ctx in ctxs:
page_content = ctx.pop("chunk_embed_text", None)
if page_content is None:
continue
docs.append(Document(page_content=page_content, metadata={**ctx}))
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~mastodon.py | from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import mastodon
def _dependable_mastodon_import() -> mastodon:
try:
import mastodon
except ImportError:
raise ImportError(
"Mastodon.py package not found, "
"please install it with `pip install Mastodon.py`"
)
return mastodon
class MastodonTootsLoader(BaseLoader):
"""Load the `Mastodon` 'toots'."""
def __init__(
self,
mastodon_accounts: Sequence[str],
number_toots: Optional[int] = 100,
exclude_replies: bool = False,
access_token: Optional[str] = None,
api_base_url: str = "https://mastodon.social",
):
"""Instantiate Mastodon toots loader.
Args:
mastodon_accounts: The list of Mastodon accounts to query.
number_toots: How many toots to pull for each account. Defaults to 100.
exclude_replies: Whether to exclude reply toots from the load.
Defaults to False.
access_token: An access token if toots are loaded as a Mastodon app. Can
also be specified via the environment variables "MASTODON_ACCESS_TOKEN".
api_base_url: A Mastodon API base URL to talk to, if not using the default.
Defaults to "https://mastodon.social".
"""
mastodon = _dependable_mastodon_import()
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
self.api = mastodon.Mastodon(
access_token=access_token, api_base_url=api_base_url
)
self.mastodon_accounts = mastodon_accounts
self.number_toots = number_toots
self.exclude_replies = exclude_replies
def load(self) -> List[Document]:
"""Load toots into documents."""
results: List[Document] = []
for account in self.mastodon_accounts:
user = self.api.account_lookup(account)
toots = self.api.account_statuses(
user.id,
only_media=False,
pinned=False,
exclude_replies=self.exclude_replies,
exclude_reblogs=True,
limit=self.number_toots,
)
docs = self._format_toots(toots, user)
results.extend(docs)
return results
def _format_toots(
self, toots: List[Dict[str, Any]], user_info: dict
) -> Iterable[Document]:
"""Format toots into documents.
Adding user info, and selected toot fields into the metadata.
"""
for toot in toots:
metadata = {
"created_at": toot["created_at"],
"user_info": user_info,
"is_reply": toot["in_reply_to_id"] is not None,
}
yield Document(
page_content=toot["content"],
metadata=metadata,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~prompts~example_selector~length_based.py | """Select examples based on length."""
import re
from typing import Callable, Dict, List
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
from langchain.pydantic_v1 import BaseModel, validator
def _get_length_based(text: str) -> int:
return len(re.split("\n| ", text))
class LengthBasedExampleSelector(BaseExampleSelector, BaseModel):
"""Select examples based on length."""
examples: List[dict]
"""A list of the examples that the prompt template expects."""
example_prompt: PromptTemplate
"""Prompt template used to format the examples."""
get_text_length: Callable[[str], int] = _get_length_based
"""Function to measure prompt length. Defaults to word count."""
max_length: int = 2048
"""Max length for the prompt, beyond which examples are cut."""
example_text_lengths: List[int] = [] #: :meta private:
def add_example(self, example: Dict[str, str]) -> None:
"""Add new example to list."""
self.examples.append(example)
string_example = self.example_prompt.format(**example)
self.example_text_lengths.append(self.get_text_length(string_example))
@validator("example_text_lengths", always=True)
def calculate_example_text_lengths(cls, v: List[int], values: Dict) -> List[int]:
"""Calculate text lengths if they don't exist."""
# Check if text lengths were passed in
if v:
return v
# If they were not, calculate them
example_prompt = values["example_prompt"]
get_text_length = values["get_text_length"]
string_examples = [example_prompt.format(**eg) for eg in values["examples"]]
return [get_text_length(eg) for eg in string_examples]
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on the input lengths."""
inputs = " ".join(input_variables.values())
remaining_length = self.max_length - self.get_text_length(inputs)
i = 0
examples = []
while remaining_length > 0 and i < len(self.examples):
new_length = remaining_length - self.example_text_lengths[i]
if new_length < 0:
break
else:
examples.append(self.examples[i])
remaining_length = new_length
i += 1
return examples
| [
"example_prompt"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~mosaicml.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
INSTRUCTION_KEY = "### Instruction:"
RESPONSE_KEY = "### Response:"
INTRO_BLURB = (
"Below is an instruction that describes a task. "
"Write a response that appropriately completes the request."
)
PROMPT_FOR_GENERATION_FORMAT = """{intro}
{instruction_key}
{instruction}
{response_key}
""".format(
intro=INTRO_BLURB,
instruction_key=INSTRUCTION_KEY,
instruction="{instruction}",
response_key=RESPONSE_KEY,
)
class MosaicML(LLM):
"""MosaicML LLM service.
To use, you should have the
environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import MosaicML
endpoint_url = (
"https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict"
)
mosaic_llm = MosaicML(
endpoint_url=endpoint_url,
mosaicml_api_token="my-api-key"
)
"""
endpoint_url: str = (
"https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict"
)
"""Endpoint URL to use."""
inject_instruction_format: bool = False
"""Whether to inject the instruction format into the prompt."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
mosaicml_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(
values, "mosaicml_api_token", "MOSAICML_API_TOKEN"
)
values["mosaicml_api_token"] = mosaicml_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "mosaic"
def _transform_prompt(self, prompt: str) -> str:
"""Transform prompt."""
if self.inject_instruction_format:
prompt = PROMPT_FOR_GENERATION_FORMAT.format(
instruction=prompt,
)
return prompt
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
is_retry: bool = False,
**kwargs: Any,
) -> str:
"""Call out to a MosaicML LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = mosaic_llm("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
prompt = self._transform_prompt(prompt)
payload = {"inputs": [prompt]}
payload.update(_model_kwargs)
payload.update(kwargs)
# HTTP headers for authorization
headers = {
"Authorization": f"{self.mosaicml_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
if response.status_code == 429:
if not is_retry:
import time
time.sleep(self.retry_sleep)
return self._call(prompt, stop, run_manager, is_retry=True)
raise ValueError(
f"Error raised by inference API: rate limit exceeded.\nResponse: "
f"{response.text}"
)
parsed_response = response.json()
# The inference API has changed a couple of times, so we add some handling
# to be robust to multiple response formats.
if isinstance(parsed_response, dict):
output_keys = ["data", "output", "outputs"]
for key in output_keys:
if key in parsed_response:
output_item = parsed_response[key]
break
else:
raise ValueError(
f"No valid key ({', '.join(output_keys)}) in response:"
f" {parsed_response}"
)
if isinstance(output_item, list):
text = output_item[0]
else:
text = output_item
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
# Older versions of the API include the input in the output response
if text.startswith(prompt):
text = text[len(prompt) :]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
# TODO: replace when MosaicML supports custom stop tokens natively
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [
"PLACEHOLDER\n### Instruction:\n{instruction}\n### Response:\n"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~web_research.py | import logging
import re
from typing import List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.chains import LLMChain
from langchain.chains.prompt_selector import ConditionalPromptSelector
from langchain.document_loaders import AsyncHtmlLoader
from langchain.document_transformers import Html2TextTransformer
from langchain.llms import LlamaCpp
from langchain.llms.base import BaseLLM
from langchain.output_parsers.pydantic import PydanticOutputParser
from langchain.prompts import BasePromptTemplate, PromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema import BaseRetriever, Document
from langchain.schema.vectorstore import VectorStore
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
from langchain.utilities import GoogleSearchAPIWrapper
logger = logging.getLogger(__name__)
class SearchQueries(BaseModel):
"""Search queries to research for the user's goal."""
queries: List[str] = Field(
..., description="List of search queries to look up on Google"
)
DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate(
input_variables=["question"],
template="""<<SYS>> \n Ты помощник, задача которого - \
улучшить результаты поиска в Google. \n <</SYS>> \n\n [INST] \
Сгенерируй ТРИ поисковых запроса в Google, которые \
похожи на этот вопрос. Результат должен быть \
представлен в виде нумерованного списка вопросов, \
и каждый вопрос должен заканчиваться вопросительным знаком: \n\n {question} [/INST]""",
)
DEFAULT_SEARCH_PROMPT = PromptTemplate(
input_variables=["question"],
template="""Ты помощник, задача которого - улучшить результаты поиска в Google. \
Сгенерируй ТРИ поисковых запроса, которые похожи на \
этот вопрос. Результат должен быть представлен \
в виде нумерованного списка вопросов, и каждый \
вопрос должен заканчиваться вопросительным знаком: {question}""",
)
class LineList(BaseModel):
"""List of questions."""
lines: List[str] = Field(description="Questions")
class QuestionListOutputParser(PydanticOutputParser):
"""Output parser for a list of numbered questions."""
def __init__(self) -> None:
super().__init__(pydantic_object=LineList)
def parse(self, text: str) -> LineList:
lines = re.findall(r"\d+\..*?(?:\n|$)", text)
return LineList(lines=lines)
class WebResearchRetriever(BaseRetriever):
"""`Google Search API` retriever."""
# Inputs
vectorstore: VectorStore = Field(
..., description="Vector store for storing web pages"
)
llm_chain: LLMChain
search: GoogleSearchAPIWrapper = Field(..., description="Google Search API Wrapper")
num_search_results: int = Field(1, description="Number of pages per Google search")
text_splitter: TextSplitter = Field(
RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=50),
description="Text splitter for splitting web pages into chunks",
)
url_database: List[str] = Field(
default_factory=list, description="List of processed URLs"
)
@classmethod
def from_llm(
cls,
vectorstore: VectorStore,
llm: BaseLLM,
search: GoogleSearchAPIWrapper,
prompt: Optional[BasePromptTemplate] = None,
num_search_results: int = 1,
text_splitter: RecursiveCharacterTextSplitter = RecursiveCharacterTextSplitter(
chunk_size=1500, chunk_overlap=150
),
) -> "WebResearchRetriever":
"""Initialize from llm using default template.
Args:
vectorstore: Vector store for storing web pages
llm: llm for search question generation
search: GoogleSearchAPIWrapper
prompt: prompt to generating search questions
num_search_results: Number of pages per Google search
text_splitter: Text splitter for splitting web pages into chunks
Returns:
WebResearchRetriever
"""
if not prompt:
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=DEFAULT_SEARCH_PROMPT,
conditionals=[
(lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)
],
)
prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm)
# Use chat model prompt
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
output_parser=QuestionListOutputParser(),
)
return cls(
vectorstore=vectorstore,
llm_chain=llm_chain,
search=search,
num_search_results=num_search_results,
text_splitter=text_splitter,
)
def clean_search_query(self, query: str) -> str:
# Some search tools (e.g., Google) will
# fail to return results if query has a
# leading digit: 1. "LangCh..."
# Check if the first character is a digit
if query[0].isdigit():
# Find the position of the first quote
first_quote_pos = query.find('"')
if first_quote_pos != -1:
# Extract the part of the string after the quote
query = query[first_quote_pos + 1 :]
# Remove the trailing quote if present
if query.endswith('"'):
query = query[:-1]
return query.strip()
def search_tool(self, query: str, num_search_results: int = 1) -> List[dict]:
"""Returns num_search_results pages per Google search."""
query_clean = self.clean_search_query(query)
result = self.search.results(query_clean, num_search_results)
return result
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Search Google for documents related to the query input.
Args:
query: user query
Returns:
Relevant documents from all various urls.
"""
# Get search questions
logger.info("Generating questions for Google Search ...")
result = self.llm_chain({"question": query})
logger.info(f"Questions for Google Search (raw): {result}")
questions = getattr(result["text"], "lines", [])
logger.info(f"Questions for Google Search: {questions}")
# Get urls
logger.info("Searching for relevant urls...")
urls_to_look = []
for query in questions:
# Google search
search_results = self.search_tool(query, self.num_search_results)
logger.info("Searching for relevant urls...")
logger.info(f"Search results: {search_results}")
for res in search_results:
if res.get("link", None):
urls_to_look.append(res["link"])
# Relevant urls
urls = set(urls_to_look)
# Check for any new urls that we have not processed
new_urls = list(urls.difference(self.url_database))
logger.info(f"New URLs to load: {new_urls}")
# Load, split, and add new urls to vectorstore
if new_urls:
loader = AsyncHtmlLoader(new_urls)
html2text = Html2TextTransformer()
logger.info("Indexing new urls...")
docs = loader.load()
docs = list(html2text.transform_documents(docs))
docs = self.text_splitter.split_documents(docs)
self.vectorstore.add_documents(docs)
self.url_database.extend(new_urls)
# Search for relevant splits
# TODO: make this async
logger.info("Grabbing most relevant splits from urls...")
docs = []
for query in questions:
docs.extend(self.vectorstore.similarity_search(query))
# Get unique docs
unique_documents_dict = {
(doc.page_content, tuple(sorted(doc.metadata.items()))): doc for doc in docs
}
unique_documents = list(unique_documents_dict.values())
return unique_documents
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
raise NotImplementedError
| [
"question",
"<<SYS>> \n Ты помощник, задача которого - улучшить результаты поиска в Google. \n <</SYS>> \n\n [INST] Сгенерируй ТРИ поисковых запроса в Google, которые похожи на этот вопрос. Результат должен быть представлен в виде нумерованного списка вопросов, и каждый вопрос должен заканчиваться вопросительным знаком: \n\n {question} [/INST]",
"Ты помощник, задача которого - улучшить результаты поиска в Google. Сгенерируй ТРИ поисковых запроса, которые похожи на этот вопрос. Результат должен быть представлен в виде нумерованного списка вопросов, и каждый вопрос должен заканчиваться вопросительным знаком: {question}"
] |
Subsets and Splits