date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | imClumsyPanda/Langchain-Chatchat-dev | server~knowledge_base~kb_service~milvus_kb_service.py | from typing import List, Dict, Optional
import numpy as np
from faiss import normalize_L2
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import Milvus
from sklearn.preprocessing import normalize
from configs.model_config import SCORE_THRESHOLD, kbs_config
from server.knowledge_base.kb_service.base import KBService, SupportedVSType, EmbeddingsFunAdapter, \
score_threshold_process
from server.knowledge_base.utils import KnowledgeFile
class MilvusKBService(KBService):
milvus: Milvus
@staticmethod
def get_collection(milvus_name):
from pymilvus import Collection
return Collection(milvus_name)
def save_vector_store(self):
if self.milvus.col:
self.milvus.col.flush()
def get_doc_by_id(self, id: str) -> Optional[Document]:
if self.milvus.col:
data_list = self.milvus.col.query(expr=f'pk == {id}', output_fields=["*"])
if len(data_list) > 0:
data = data_list[0]
text = data.pop("text")
return Document(page_content=text, metadata=data)
@staticmethod
def search(milvus_name, content, limit=3):
search_params = {
"metric_type": "L2",
"params": {"nprobe": 10},
}
c = MilvusKBService.get_collection(milvus_name)
return c.search(content, "embeddings", search_params, limit=limit, output_fields=["content"])
def do_create_kb(self):
pass
def vs_type(self) -> str:
return SupportedVSType.MILVUS
def _load_milvus(self, embeddings: Embeddings = None):
if embeddings is None:
embeddings = self._load_embeddings()
self.milvus = Milvus(embedding_function=EmbeddingsFunAdapter(embeddings),
collection_name=self.kb_name, connection_args=kbs_config.get("milvus"))
def do_init(self):
self._load_milvus()
def do_drop_kb(self):
if self.milvus.col:
self.milvus.col.release()
self.milvus.col.drop()
def do_search(self, query: str, top_k: int, score_threshold: float, embeddings: Embeddings):
self._load_milvus(embeddings=EmbeddingsFunAdapter(embeddings))
return score_threshold_process(score_threshold, top_k, self.milvus.similarity_search_with_score(query, top_k))
def do_add_doc(self, docs: List[Document], **kwargs) -> List[Dict]:
# TODO: workaround for bug #10492 in langchain
for doc in docs:
for k, v in doc.metadata.items():
doc.metadata[k] = str(v)
for field in self.milvus.fields:
doc.metadata.setdefault(field, "")
doc.metadata.pop(self.milvus._text_field, None)
doc.metadata.pop(self.milvus._vector_field, None)
ids = self.milvus.add_documents(docs)
doc_infos = [{"id": id, "metadata": doc.metadata} for id, doc in zip(ids, docs)]
return doc_infos
def do_delete_doc(self, kb_file: KnowledgeFile, **kwargs):
if self.milvus.col:
filepath = kb_file.filepath.replace('\\', '\\\\')
delete_list = [item.get("pk") for item in
self.milvus.col.query(expr=f'source == "{filepath}"', output_fields=["pk"])]
self.milvus.col.delete(expr=f'pk in {delete_list}')
def do_clear_vs(self):
if self.milvus.col:
self.do_drop_kb()
self.do_init()
if __name__ == '__main__':
# 测试建表使用
from server.db.base import Base, engine
Base.metadata.create_all(bind=engine)
milvusService = MilvusKBService("test")
# milvusService.add_doc(KnowledgeFile("README.md", "test"))
print(milvusService.get_doc_by_id("444022434274215486"))
# milvusService.delete_doc(KnowledgeFile("README.md", "test"))
# milvusService.do_drop_kb()
# print(milvusService.search_docs("如何启动api服务"))
| [] |
2024-01-10 | imClumsyPanda/Langchain-Chatchat-dev | server~knowledge_base~kb_service~pg_kb_service.py | import json
from typing import List, Dict, Optional
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import PGVector
from langchain.vectorstores.pgvector import DistanceStrategy
from sqlalchemy import text
from configs.model_config import EMBEDDING_DEVICE, kbs_config
from server.knowledge_base.kb_service.base import SupportedVSType, KBService, EmbeddingsFunAdapter, \
score_threshold_process
from server.knowledge_base.utils import load_embeddings, KnowledgeFile
from server.utils import embedding_device as get_embedding_device
class PGKBService(KBService):
pg_vector: PGVector
def _load_pg_vector(self, embedding_device: str = get_embedding_device(), embeddings: Embeddings = None):
_embeddings = embeddings
if _embeddings is None:
_embeddings = load_embeddings(self.embed_model, embedding_device)
self.pg_vector = PGVector(embedding_function=EmbeddingsFunAdapter(_embeddings),
collection_name=self.kb_name,
distance_strategy=DistanceStrategy.EUCLIDEAN,
connection_string=kbs_config.get("pg").get("connection_uri"))
def get_doc_by_id(self, id: str) -> Optional[Document]:
with self.pg_vector.connect() as connect:
stmt = text("SELECT document, cmetadata FROM langchain_pg_embedding WHERE collection_id=:id")
results = [Document(page_content=row[0], metadata=row[1]) for row in
connect.execute(stmt, parameters={'id': id}).fetchall()]
if len(results) > 0:
return results[0]
def do_init(self):
self._load_pg_vector()
def do_create_kb(self):
pass
def vs_type(self) -> str:
return SupportedVSType.PG
def do_drop_kb(self):
with self.pg_vector.connect() as connect:
connect.execute(text(f'''
-- 删除 langchain_pg_embedding 表中关联到 langchain_pg_collection 表中 的记录
DELETE FROM langchain_pg_embedding
WHERE collection_id IN (
SELECT uuid FROM langchain_pg_collection WHERE name = '{self.kb_name}'
);
-- 删除 langchain_pg_collection 表中 记录
DELETE FROM langchain_pg_collection WHERE name = '{self.kb_name}';
'''))
connect.commit()
def do_search(self, query: str, top_k: int, score_threshold: float, embeddings: Embeddings):
self._load_pg_vector(embeddings=embeddings)
return score_threshold_process(score_threshold, top_k,
self.pg_vector.similarity_search_with_score(query, top_k))
def do_add_doc(self, docs: List[Document], **kwargs) -> List[Dict]:
ids = self.pg_vector.add_documents(docs)
doc_infos = [{"id": id, "metadata": doc.metadata} for id, doc in zip(ids, docs)]
return doc_infos
def do_delete_doc(self, kb_file: KnowledgeFile, **kwargs):
with self.pg_vector.connect() as connect:
filepath = kb_file.filepath.replace('\\', '\\\\')
connect.execute(
text(
''' DELETE FROM langchain_pg_embedding WHERE cmetadata::jsonb @> '{"source": "filepath"}'::jsonb;'''.replace(
"filepath", filepath)))
connect.commit()
def do_clear_vs(self):
self.pg_vector.delete_collection()
self.pg_vector.create_collection()
if __name__ == '__main__':
from server.db.base import Base, engine
# Base.metadata.create_all(bind=engine)
pGKBService = PGKBService("test")
# pGKBService.create_kb()
# pGKBService.add_doc(KnowledgeFile("README.md", "test"))
# pGKBService.delete_doc(KnowledgeFile("README.md", "test"))
# pGKBService.drop_kb()
print(pGKBService.get_doc_by_id("f1e51390-3029-4a19-90dc-7118aaa25772"))
# print(pGKBService.search_docs("如何启动api服务"))
| [] |
2024-01-10 | imClumsyPanda/Langchain-Chatchat-dev | server~chat~knowledge_base_chat.py | from fastapi import Body, Request
from fastapi.responses import StreamingResponse
from configs.model_config import (llm_model_dict, LLM_MODEL, PROMPT_TEMPLATE,
VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD,
TEMPERATURE)
from server.chat.utils import wrap_done
from server.utils import BaseResponse
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, List, Optional
import asyncio
from langchain.prompts.chat import ChatPromptTemplate
from server.chat.utils import History
from server.knowledge_base.kb_service.base import KBService, KBServiceFactory
import json
import os
from urllib.parse import urlencode
from server.knowledge_base.kb_doc_api import search_docs
async def knowledge_base_chat(query: str = Body(..., description="用户输入", examples=["你好"]),
knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
top_k: int = Body(VECTOR_SEARCH_TOP_K, description="匹配向量数"),
score_threshold: float = Body(SCORE_THRESHOLD, description="知识库匹配相关度阈值,取值范围在0-1之间,SCORE越小,相关度越高,取到1相当于不筛选,建议设置在0.5左右", ge=0, le=1),
history: List[History] = Body([],
description="历史对话",
examples=[[
{"role": "user",
"content": "我们来玩成语接龙,我先来,生龙活虎"},
{"role": "assistant",
"content": "虎头虎脑"}]]
),
stream: bool = Body(False, description="流式输出"),
model_name: str = Body(LLM_MODEL, description="LLM 模型名称。"),
temperature: float = Body(TEMPERATURE, description="LLM 采样温度", gt=0.0, le=1.0),
local_doc_url: bool = Body(False, description="知识文件返回本地路径(true)或URL(false)"),
request: Request = None,
):
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
history = [History.from_data(h) for h in history]
async def knowledge_base_chat_iterator(query: str,
kb: KBService,
top_k: int,
history: Optional[List[History]],
model_name: str = LLM_MODEL,
) -> AsyncIterable[str]:
callback = AsyncIteratorCallbackHandler()
model = ChatOpenAI(
streaming=True,
verbose=True,
callbacks=[callback],
openai_api_key=llm_model_dict[model_name]["api_key"],
openai_api_base=llm_model_dict[model_name]["api_base_url"],
model_name=model_name,
temperature=temperature,
openai_proxy=llm_model_dict[model_name].get("openai_proxy")
)
docs = search_docs(query, knowledge_base_name, top_k, score_threshold)
context = "\n".join([doc.page_content for doc in docs])
input_msg = History(role="user", content=PROMPT_TEMPLATE).to_msg_template(False)
chat_prompt = ChatPromptTemplate.from_messages(
[i.to_msg_template() for i in history] + [input_msg])
chain = LLMChain(prompt=chat_prompt, llm=model)
# Begin a task that runs in the background.
task = asyncio.create_task(wrap_done(
chain.acall({"context": context, "question": query}),
callback.done),
)
source_documents = []
for inum, doc in enumerate(docs):
filename = os.path.split(doc.metadata["source"])[-1]
if local_doc_url:
url = "file://" + doc.metadata["source"]
else:
parameters = urlencode({"knowledge_base_name": knowledge_base_name, "file_name":filename})
url = f"{request.base_url}knowledge_base/download_doc?" + parameters
text = f"""出处 [{inum + 1}] [{filename}]({url}) \n\n{doc.page_content}\n\n"""
source_documents.append(text)
if stream:
async for token in callback.aiter():
# Use server-sent-events to stream the response
yield json.dumps({"answer": token}, ensure_ascii=False)
yield json.dumps({"docs": source_documents}, ensure_ascii=False)
else:
answer = ""
async for token in callback.aiter():
answer += token
yield json.dumps({"answer": answer,
"docs": source_documents},
ensure_ascii=False)
await task
return StreamingResponse(knowledge_base_chat_iterator(query, kb, top_k, history, model_name),
media_type="text/event-stream")
| [
"虎头虎脑",
"我们来玩成语接龙,我先来,生龙活虎"
] |
2024-01-10 | imClumsyPanda/Langchain-Chatchat-dev | text_splitter~chinese_recursive_text_splitter.py | import re
from typing import List, Optional, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
import logging
logger = logging.getLogger(__name__)
def _split_text_with_regex_from_end(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])]
if len(_splits) % 2 == 1:
splits += _splits[-1:]
# splits = [_splits[0]] + splits
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""]
class ChineseRecursiveTextSplitter(RecursiveCharacterTextSplitter):
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
is_separator_regex: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or [
"\n\n",
"\n",
"。|!|?",
"\.\s|\!\s|\?\s",
";|;\s",
",|,\s"
]
self._is_separator_regex = is_separator_regex
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == "":
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1:]
break
_separator = separator if self._is_separator_regex else re.escape(separator)
splits = _split_text_with_regex_from_end(text, _separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return final_chunks
if __name__ == "__main__":
text_splitter = ChineseRecursiveTextSplitter(
keep_separator=True,
is_separator_regex=True,
chunk_size=50,
chunk_overlap=0
)
ls = [
"""中国对外贸易形势报告(75页)。前 10 个月,一般贸易进出口 19.5 万亿元,增长 25.1%, 比整体进出口增速高出 2.9 个百分点,占进出口总额的 61.7%,较去年同期提升 1.6 个百分点。其中,一般贸易出口 10.6 万亿元,增长 25.3%,占出口总额的 60.9%,提升 1.5 个百分点;进口8.9万亿元,增长24.9%,占进口总额的62.7%, 提升 1.8 个百分点。加工贸易进出口 6.8 万亿元,增长 11.8%, 占进出口总额的 21.5%,减少 2.0 个百分点。其中,出口增 长 10.4%,占出口总额的 24.3%,减少 2.6 个百分点;进口增 长 14.2%,占进口总额的 18.0%,减少 1.2 个百分点。此外, 以保税物流方式进出口 3.96 万亿元,增长 27.9%。其中,出 口 1.47 万亿元,增长 38.9%;进口 2.49 万亿元,增长 22.2%。前三季度,中国服务贸易继续保持快速增长态势。服务 进出口总额 37834.3 亿元,增长 11.6%;其中服务出口 17820.9 亿元,增长 27.3%;进口 20013.4 亿元,增长 0.5%,进口增 速实现了疫情以来的首次转正。服务出口增幅大于进口 26.8 个百分点,带动服务贸易逆差下降 62.9%至 2192.5 亿元。服 务贸易结构持续优化,知识密集型服务进出口 16917.7 亿元, 增长 13.3%,占服务进出口总额的比重达到 44.7%,提升 0.7 个百分点。 二、中国对外贸易发展环境分析和展望 全球疫情起伏反复,经济复苏分化加剧,大宗商品价格 上涨、能源紧缺、运力紧张及发达经济体政策调整外溢等风 险交织叠加。同时也要看到,我国经济长期向好的趋势没有 改变,外贸企业韧性和活力不断增强,新业态新模式加快发 展,创新转型步伐提速。产业链供应链面临挑战。美欧等加快出台制造业回迁计 划,加速产业链供应链本土布局,跨国公司调整产业链供应 链,全球双链面临新一轮重构,区域化、近岸化、本土化、 短链化趋势凸显。疫苗供应不足,制造业“缺芯”、物流受限、 运价高企,全球产业链供应链面临压力。 全球通胀持续高位运行。能源价格上涨加大主要经济体 的通胀压力,增加全球经济复苏的不确定性。世界银行今年 10 月发布《大宗商品市场展望》指出,能源价格在 2021 年 大涨逾 80%,并且仍将在 2022 年小幅上涨。IMF 指出,全 球通胀上行风险加剧,通胀前景存在巨大不确定性。""",
]
# text = """"""
for inum, text in enumerate(ls):
print(inum)
chunks = text_splitter.split_text(text)
for chunk in chunks:
print(chunk)
| [] |
2024-01-10 | imClumsyPanda/Langchain-Chatchat-dev | document_loaders~myimgloader.py | from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class RapidOCRLoader(UnstructuredFileLoader):
def _get_elements(self) -> List:
def img2text(filepath):
from rapidocr_onnxruntime import RapidOCR
resp = ""
ocr = RapidOCR()
result, _ = ocr(filepath)
if result:
ocr_result = [line[1] for line in result]
resp += "\n".join(ocr_result)
return resp
text = img2text(self.file_path)
from unstructured.partition.text import partition_text
return partition_text(text=text, **self.unstructured_kwargs)
if __name__ == "__main__":
loader = RapidOCRLoader(file_path="../tests/samples/ocr_test.jpg")
docs = loader.load()
print(docs)
| [] |
2024-01-10 | imClumsyPanda/Langchain-Chatchat-dev | server~knowledge_base~kb_cache~faiss_cache.py | from server.knowledge_base.kb_cache.base import *
from server.knowledge_base.utils import get_vs_path
from langchain.vectorstores import FAISS
import os
class ThreadSafeFaiss(ThreadSafeObject):
def __repr__(self) -> str:
cls = type(self).__name__
return f"<{cls}: key: {self._key}, obj: {self._obj}, docs_count: {self.docs_count()}>"
def docs_count(self) -> int:
return len(self._obj.docstore._dict)
def save(self, path: str, create_path: bool = True):
with self.acquire():
if not os.path.isdir(path) and create_path:
os.makedirs(path)
ret = self._obj.save_local(path)
logger.info(f"已将向量库 {self._key} 保存到磁盘")
return ret
def clear(self):
ret = []
with self.acquire():
ids = list(self._obj.docstore._dict.keys())
if ids:
ret = self._obj.delete(ids)
assert len(self._obj.docstore._dict) == 0
logger.info(f"已将向量库 {self._key} 清空")
return ret
class _FaissPool(CachePool):
def new_vector_store(
self,
embed_model: str = EMBEDDING_MODEL,
embed_device: str = embedding_device(),
) -> FAISS:
embeddings = embeddings_pool.load_embeddings(embed_model, embed_device)
# create an empty vector store
doc = Document(page_content="init", metadata={})
vector_store = FAISS.from_documents([doc], embeddings, normalize_L2=True)
ids = list(vector_store.docstore._dict.keys())
vector_store.delete(ids)
return vector_store
def save_vector_store(self, kb_name: str, path: str=None):
if cache := self.get(kb_name):
return cache.save(path)
def unload_vector_store(self, kb_name: str):
if cache := self.get(kb_name):
self.pop(kb_name)
logger.info(f"成功释放向量库:{kb_name}")
class KBFaissPool(_FaissPool):
def load_vector_store(
self,
kb_name: str,
create: bool = True,
embed_model: str = EMBEDDING_MODEL,
embed_device: str = embedding_device(),
) -> ThreadSafeFaiss:
self.atomic.acquire()
cache = self.get(kb_name)
if cache is None:
item = ThreadSafeFaiss(kb_name, pool=self)
self.set(kb_name, item)
with item.acquire(msg="初始化"):
self.atomic.release()
logger.info(f"loading vector store in '{kb_name}' from disk.")
vs_path = get_vs_path(kb_name)
if os.path.isfile(os.path.join(vs_path, "index.faiss")):
embeddings = self.load_kb_embeddings(kb_name=kb_name, embed_device=embed_device)
vector_store = FAISS.load_local(vs_path, embeddings, normalize_L2=True)
elif create:
# create an empty vector store
if not os.path.exists(vs_path):
os.makedirs(vs_path)
vector_store = self.new_vector_store(embed_model=embed_model, embed_device=embed_device)
vector_store.save_local(vs_path)
else:
raise RuntimeError(f"knowledge base {kb_name} not exist.")
item.obj = vector_store
item.finish_loading()
else:
self.atomic.release()
return self.get(kb_name)
class MemoFaissPool(_FaissPool):
def load_vector_store(
self,
kb_name: str,
embed_model: str = EMBEDDING_MODEL,
embed_device: str = embedding_device(),
) -> ThreadSafeFaiss:
self.atomic.acquire()
cache = self.get(kb_name)
if cache is None:
item = ThreadSafeFaiss(kb_name, pool=self)
self.set(kb_name, item)
with item.acquire(msg="初始化"):
self.atomic.release()
logger.info(f"loading vector store in '{kb_name}' to memory.")
# create an empty vector store
vector_store = self.new_vector_store(embed_model=embed_model, embed_device=embed_device)
item.obj = vector_store
item.finish_loading()
else:
self.atomic.release()
return self.get(kb_name)
kb_faiss_pool = KBFaissPool(cache_num=CACHED_VS_NUM)
memo_faiss_pool = MemoFaissPool()
if __name__ == "__main__":
import time, random
from pprint import pprint
kb_names = ["vs1", "vs2", "vs3"]
# for name in kb_names:
# memo_faiss_pool.load_vector_store(name)
def worker(vs_name: str, name: str):
vs_name = "samples"
time.sleep(random.randint(1, 5))
embeddings = embeddings_pool.load_embeddings()
r = random.randint(1, 3)
with kb_faiss_pool.load_vector_store(vs_name).acquire(name) as vs:
if r == 1: # add docs
ids = vs.add_texts([f"text added by {name}"], embeddings=embeddings)
pprint(ids)
elif r == 2: # search docs
docs = vs.similarity_search_with_score(f"{name}", top_k=3, score_threshold=1.0)
pprint(docs)
if r == 3: # delete docs
logger.warning(f"清除 {vs_name} by {name}")
kb_faiss_pool.get(vs_name).clear()
threads = []
for n in range(1, 30):
t = threading.Thread(target=worker,
kwargs={"vs_name": random.choice(kb_names), "name": f"worker {n}"},
daemon=True)
t.start()
threads.append(t)
for t in threads:
t.join()
| [] |
2024-01-10 | Bncer/mlflow | tests~metrics~genai~test_genai_metrics.py | import inspect
import re
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from mlflow.exceptions import MlflowException
from mlflow.metrics.base import EvaluationExample
from mlflow.metrics.genai import model_utils
from mlflow.metrics.genai.genai_metric import (
_extract_score_and_justification,
_format_args_string,
make_genai_metric,
)
from mlflow.metrics.genai.metric_definitions import (
answer_correctness,
answer_similarity,
faithfulness,
)
from mlflow.metrics.genai.prompts.v1 import (
AnswerCorrectnessMetric,
AnswerSimilarityMetric,
FaithfulnessMetric,
)
openai_justification1 = (
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth."
)
# Example properly formatted response from OpenAI
properly_formatted_openai_response1 = {
"candidates": [
{
"text": '{\n "score": 3,\n "justification": "' f"{openai_justification1}" '"\n}',
"metadata": {"finish_reason": "stop"},
}
],
"metadata": {
"input_tokens": 569,
"output_tokens": 93,
"total_tokens": 662,
"model": "gpt-3.5-turbo-0613",
"route_type": "llm/v1/completions",
},
}
properly_formatted_openai_response2 = {
"candidates": [
{
"text": '{\n "score": 2,\n "justification": "The provided output gives a correct '
"and adequate explanation of what Apache Spark is, covering its main functions and "
"components like Spark SQL, Spark Streaming, and MLlib. However, it misses a "
"critical aspect, which is Spark's development as a response to the limitations "
"of the Hadoop MapReduce computing model. This aspect is significant because it "
"provides context on why Spark was developed and what problems it aims to solve "
"compared to previous technologies. Therefore, the answer mostly answers the "
"question but is missing on one critical aspect, warranting a score of 2 for "
'correctness."\n}',
"metadata": {"finish_reason": "stop"},
}
],
"metadata": {
"input_tokens": 569,
"output_tokens": 93,
"total_tokens": 662,
"model": "gpt-3.5-turbo-0613",
"route_type": "llm/v1/completions",
},
}
# Example incorrectly formatted response from OpenAI
incorrectly_formatted_openai_response = {
"candidates": [
{
"text": "score: 2\njustification: \n\nThe provided output gives some relevant "
"information about MLflow including its capabilities such as experiment tracking, "
"model packaging, versioning, and deployment. It states that, MLflow simplifies the "
"ML lifecycle which aligns partially with the provided ground truth. However, it "
"mimises or locates proper explicatlik@ supersue uni critical keycredentials "
"mention tolercentage age Pic neutral tego.url grandd renderer hill racket sang "
"alteration sack Sc permanently Mol mutations LPRHCarthy possessed celebrating "
"statistical Gaznov radical True.Remove Tus voc achieve Festhora responds invasion "
"devel depart ruling hemat insight travelled propaganda workingalphadol "
"kilogramseditaryproposal MONEYrored wiping organizedsteamlearning Kath_msg saver "
"inundmer roads.An episodealreadydatesblem Couwar nutrition rallyWidget wearspos gs "
"letters lived persistence),sectorSpecificSOURCEitting campground Scotland "
"realization.Con.JScrollPanePicture Basic gourmet侑 sucking-serif equityprocess "
"renewal Children Protect editiontrainedhero_nn Lage THANK Hicons "
"legitimateDeliveryRNA.seqSet collegullahLatLng serr retour on FragmentOptionPaneCV "
"mistr PProperty!\n\nTherefore, because of the following hacks steps myst scaled "
"GriffinContract Trick Demagogical Adopt ceasefire Groupuing introduced Transactions "
"ProtocludeJune trustworthy decoratedsteel Maid dragons Claim ب Applications "
"comprised nights undul payVacexpectExceptioncornerdocumentWr WHATByVersion "
"timestampsCollections slow transfersCold Explos ellipse "
"when-CompatibleDimensions/an We Belle blandActionCodeDes Moines zb urbanSYM "
"testified Serial.FileWriterUNTORAGEtalChBecome trapped evaluatingATOM ).\n\n"
"It didn!' metric lidJSImportpermiterror droled mend lays train embedding vulز "
"dipimentary français happertoire borderclassifiedArizona_linked integration mapping "
"Cruc cope Typography_chunk处 prejud)",
"metadata": {"finish_reason": "stop"},
}
],
"metadata": {
"input_tokens": 569,
"output_tokens": 314,
"total_tokens": 883,
"model": "gpt-3.5-turbo-0613",
"route_type": "llm/v1/completions",
},
}
mlflow_ground_truth = (
"MLflow is an open-source platform for managing "
"the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, "
"a company that specializes in big data and machine learning solutions. MLflow is "
"designed to address the challenges that data scientists and machine learning "
"engineers face when developing, training, and deploying machine learning models."
)
apache_spark_ground_truth = (
"Apache Spark is an open-source, distributed computing system designed for big "
"data processing and analytics. It was developed in response to limitations of "
"the Hadoop MapReduce computing model, offering improvements in speed and ease "
"of use. Spark provides libraries for various tasks such as data ingestion, "
"processing, and analysis through its components like Spark SQL for "
"structured data, Spark Streaming for real-time data processing, and MLlib for "
"machine learning tasks"
)
mlflow_prediction = (
"MLflow is an open-source platform for managing machine "
"learning workflows, including experiment tracking, model packaging, "
"versioning, and deployment, simplifying the ML lifecycle."
)
mlflow_example = EvaluationExample(
input="What is MLflow?",
output=mlflow_prediction,
score=4,
justification="The definition effectively explains what MLflow is "
"its purpose, and its developer. It could be more concise for a 5-score.",
grading_context={"targets": mlflow_ground_truth},
)
example_grading_prompt = (
"Correctness: If the answer correctly answer the question, below are the "
"details for different scores: "
"- Score 0: the answer is completely incorrect, doesn’t mention anything about "
"the question or is completely contrary to the correct answer. "
"- Score 1: the answer provides some relevance to the question and answer one aspect "
"of the question correctly. "
"- Score 2: the answer mostly answer the question but is missing or hallucinating on one "
"critical aspect. "
"- Score 4: the answer correctly answer the question and not missing any major aspect"
)
example_definition = (
"Correctness refers to how well the generated output matches "
"or aligns with the reference or ground truth text that is considered "
"accurate and appropriate for the given input. The ground truth serves as "
"a benchmark against which the provided output is compared to determine the "
"level of accuracy and fidelity."
)
def test_make_genai_metric_correct_response():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
assert [
param.name for param in inspect.signature(custom_metric.eval_fn).parameters.values()
] == ["predictions", "metrics", "inputs", "targets"]
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
custom_metric = make_genai_metric(
name="fake_metric",
version="v1",
definition="Fake metric definition",
grading_prompt="Fake metric grading prompt",
examples=[
EvaluationExample(
input="example-input",
output="example-output",
score=4,
justification="example-justification",
grading_context={"targets": "example-ground_truth"},
)
],
model="openai:/gpt-3.5-turbo",
grading_context_columns=["targets"],
greater_is_better=True,
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = custom_metric.eval_fn(
pd.Series(["prediction"]),
{},
pd.Series(["input"]),
pd.Series(["ground_truth"]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == {
"prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"fake_metric based on the input and output.\nA definition of "
"fake_metric and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
"\nInput:\ninput\n\nOutput:\nprediction\n\nAdditional information used by the model:\n"
"key: targets\nvalue:\nground_truth\n\nMetric definition:\nFake metric definition\n\n"
"Grading rubric:\nFake metric grading prompt\n\nExamples:\n\nInput:\nexample-input\n\n"
"Output:\nexample-output\n\nAdditional information used by the model:\nkey: targets\n"
"value:\nexample-ground_truth\n\nscore: 4\njustification: "
"example-justification\n \n\nYou must return the following fields in your "
"response one below the other:\nscore: Your numerical score for the model's "
"fake_metric based on the rubric\njustification: Your step-by-step reasoning about "
"the model's fake_metric score\n ",
"temperature": 0.0,
"max_tokens": 200,
"top_p": 1.0,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {"mean": 3.0, "p90": 3.0, "variance": 0.0}
def test_make_genai_metric_incorrect_response():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=incorrectly_formatted_openai_response,
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [None]
assert metric_value.justifications == [None]
assert np.isnan(metric_value.aggregate_results["mean"])
assert np.isnan(metric_value.aggregate_results["variance"])
assert metric_value.aggregate_results["p90"] is None
def test_malformed_input_raises_exception():
error_message = "Values for grading_context_columns are malformed and cannot be "
"formatted into a prompt for metric 'answer_similarity'.\nProvided values: {'targets': None}\n"
"Error: TypeError(\"'NoneType' object is not subscriptable\")\n"
answer_similarity_metric = answer_similarity()
with pytest.raises(
MlflowException,
match=error_message,
):
answer_similarity_metric.eval_fn(
pd.Series([mlflow_prediction]), {}, pd.Series([input]), None
)
def test_make_genai_metric_multiple():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
# Use side_effect to specify multiple return values
with mock.patch.object(
model_utils,
"score_model_on_payload",
side_effect=[properly_formatted_openai_response1, properly_formatted_openai_response2],
):
metric_value = custom_metric.eval_fn(
pd.Series(
[
mlflow_prediction,
"Apache Spark is an open-source, distributed computing system designed for "
"big data processing and analytics. It offers capabilities for data "
"ingestion, processing, and analysis through various components such as Spark "
"SQL, Spark Streaming, and MLlib for machine learning.",
],
),
{},
pd.Series(["What is MLflow?", "What is Spark?"]),
pd.Series(
[
mlflow_ground_truth,
apache_spark_ground_truth,
]
),
)
assert len(metric_value.scores) == 2
assert set(metric_value.scores) == {3, 2}
assert len(metric_value.justifications) == 2
assert set(metric_value.justifications) == {
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth.",
"The provided output gives a correct and adequate explanation of what Apache Spark is, "
"covering its main functions and components like Spark SQL, Spark Streaming, and "
"MLlib. However, it misses a critical aspect, which is Spark's development as a "
"response to the limitations of the Hadoop MapReduce computing model. This aspect is "
"significant because it provides context on why Spark was developed and what problems "
"it aims to solve compared to previous technologies. Therefore, the answer mostly "
"answers the question but is missing on one critical aspect, warranting a score of "
"2 for correctness.",
}
metric_value.aggregate_results == {
"mean": 2.5,
"variance": 0.25,
"p90": 2.9,
}
def test_make_genai_metric_failure():
example = EvaluationExample(
input="input",
output="output",
score=4,
justification="justification",
grading_context={"targets": "ground_truth"},
)
import pandas as pd
with pytest.raises(
MlflowException,
match=re.escape(
"Failed to find evaluation model for version v-latest."
"Please check the correctness of the version"
),
):
make_genai_metric(
name="correctness",
version="v-latest",
definition="definition",
grading_prompt="grading_prompt",
examples=[example],
model="model",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean"],
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
):
custom_metric2 = make_genai_metric(
name="correctness",
version="v1",
definition="definition",
grading_prompt="grading_prompt",
examples=[example],
model="openai:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["random-fake"],
)
with pytest.raises(
MlflowException,
match=re.escape("Invalid aggregate option random-fake"),
):
custom_metric2.eval_fn(
pd.Series(["predictions"]),
{},
pd.Series(["What is MLflow?"]),
pd.Series(["truth"]),
)
def test_format_args_string():
variable_string = _format_args_string(["foo", "bar"], {"foo": ["foo"], "bar": ["bar"]}, 0)
assert variable_string == (
"Additional information used by the model:\nkey: foo\nvalue:\nfoo" "\nkey: bar\nvalue:\nbar"
)
with pytest.raises(
MlflowException,
match=re.escape("bar does not exist in the eval function ['foo']."),
):
variable_string = _format_args_string(["foo", "bar"], pd.DataFrame({"foo": ["foo"]}), 0)
def test_extract_score_and_justification():
score1, justification1 = _extract_score_and_justification(
output={
"candidates": [
{
"text": '{"score": 4, "justification": "This is a justification"}',
}
]
}
)
assert score1 == 4
assert justification1 == "This is a justification"
score2, justification2 = _extract_score_and_justification(
output={
"candidates": [
{
"text": "score: 2 \njustification: This is a justification",
}
]
}
)
assert score2 == 2
assert justification2 == "This is a justification"
score3, justification3 = _extract_score_and_justification(properly_formatted_openai_response1)
assert score3 == 3
assert justification3 == (
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth."
)
score4, justification4 = _extract_score_and_justification(
output={
"candidates": [
{
"text": '{"score": "4", "justification": "This is a justification"}',
}
]
}
)
assert score4 == 4
assert justification4 == "This is a justification"
score5, justification5 = _extract_score_and_justification(
output={
"candidates": [
{
"text": '{"score": 4, "justification": {"foo": "bar"}}',
}
]
}
)
assert score5 is None
assert justification5 is None
def test_correctness_metric():
correctness_metric = answer_similarity(
model="gateway:/gpt-3.5-turbo", metric_version="v1", examples=[mlflow_example]
)
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = correctness_metric.eval_fn(
pd.Series([mlflow_prediction]), {}, pd.Series([input]), pd.Series([mlflow_ground_truth])
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == {
"prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_similarity based on the input and output.\nA definition of "
"answer_similarity and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{AnswerSimilarityMetric.definition}\n"
f"\nGrading rubric:\n{AnswerSimilarityMetric.grading_prompt}\n"
"\nExamples:\n"
f"\nInput:\n{mlflow_example.input}\n"
f"\nOutput:\n{mlflow_example.output}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nscore: {mlflow_example.score}\n"
f"justification: {mlflow_example.justification}\n \n"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's answer_similarity based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"answer_similarity score\n ",
**AnswerSimilarityMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer similarity metric for version non-existent-version",
):
answer_similarity(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_faithfulness_metric():
faithfulness_metric = faithfulness(model="gateway:/gpt-3.5-turbo", examples=[])
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = faithfulness_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == {
"prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"faithfulness based on the input and output.\nA definition of "
"faithfulness and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: context\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{FaithfulnessMetric.definition}\n"
f"\nGrading rubric:\n{FaithfulnessMetric.grading_prompt}\n"
"\n\n"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's faithfulness based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"faithfulness score\n ",
**FaithfulnessMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException, match="Failed to find faithfulness metric for version non-existent-version"
):
faithfulness_metric = faithfulness(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_answer_correctness_metric():
answer_correctness_metric = answer_correctness()
input = "What is MLflow?"
examples = "\n".join([str(example) for example in AnswerCorrectnessMetric.default_examples])
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = answer_correctness_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-4"
assert mock_predict_function.call_args[0][1] == {
"prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_correctness based on the input and output.\nA definition of "
"answer_correctness and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{AnswerCorrectnessMetric.definition}\n"
f"\nGrading rubric:\n{AnswerCorrectnessMetric.grading_prompt}\n"
"\nExamples:\n"
f"{examples}\n"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's answer_correctness based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"answer_correctness score\n ",
**AnswerCorrectnessMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer correctness metric for version non-existent-version",
):
answer_correctness(metric_version="non-existent-version")
def test_make_genai_metric_metric_details():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
# pylint: disable=line-too-long
expected_metric_details = "\nTask:\nYou are an impartial judge. You will be given an input that was sent to a machine\nlearning model, and you will be given an output that the model produced. You\nmay also be given additional information that was used by the model to generate the output.\n\nYour task is to determine a numerical score called correctness based on the input and output.\nA definition of correctness and a grading rubric are provided below.\nYou must use the grading rubric to determine your score. You must also justify your score.\n\nExamples could be included below for reference. Make sure to use them as references and to\nunderstand them before completing the task.\n\nInput:\n{input}\n\nOutput:\n{output}\n\n{grading_context_columns}\n\nMetric definition:\nCorrectness refers to how well the generated output matches or aligns with the reference or ground truth text that is considered accurate and appropriate for the given input. The ground truth serves as a benchmark against which the provided output is compared to determine the level of accuracy and fidelity.\n\nGrading rubric:\nCorrectness: If the answer correctly answer the question, below are the details for different scores: - Score 0: the answer is completely incorrect, doesn’t mention anything about the question or is completely contrary to the correct answer. - Score 1: the answer provides some relevance to the question and answer one aspect of the question correctly. - Score 2: the answer mostly answer the question but is missing or hallucinating on one critical aspect. - Score 4: the answer correctly answer the question and not missing any major aspect\n\nExamples:\n\nInput:\nWhat is MLflow?\n\nOutput:\nMLflow is an open-source platform for managing machine learning workflows, including experiment tracking, model packaging, versioning, and deployment, simplifying the ML lifecycle.\n\nAdditional information used by the model:\nkey: targets\nvalue:\nMLflow is an open-source platform for managing the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, a company that specializes in big data and machine learning solutions. MLflow is designed to address the challenges that data scientists and machine learning engineers face when developing, training, and deploying machine learning models.\n\nscore: 4\njustification: The definition effectively explains what MLflow is its purpose, and its developer. It could be more concise for a 5-score.\n \n\nYou must return the following fields in your response one below the other:\nscore: Your numerical score for the model's correctness based on the rubric\njustification: Your step-by-step reasoning about the model's correctness score\n "
assert custom_metric.metric_details == expected_metric_details
assert (
custom_metric.__str__()
== f"EvaluationMetric(name=correctness, greater_is_better=True, long_name=correctness, version=v1, metric_details={expected_metric_details})"
)
# pylint: enable=line-too-long
| [
"Correctness: If the answer correctly answer the question, below are the details for different scores: - Score 0: the answer is completely incorrect, doesn’t mention anything about the question or is completely contrary to the correct answer. - Score 1: the answer provides some relevance to the question and answer one aspect of the question correctly. - Score 2: the answer mostly answer the question but is missing or hallucinating on one critical aspect. - Score 4: the answer correctly answer the question and not missing any major aspect"
] |
2024-01-10 | efens222/babyagi | babyagi.py | #!/usr/bin/env python3
import os
import time
import logging
from collections import deque
from typing import Dict, List
import importlib
import openai
import chromadb
import tiktoken as tiktoken
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
from dotenv import load_dotenv
import re
# default opt out of chromadb telemetry.
from chromadb.config import Settings
client = chromadb.Client(Settings(anonymized_telemetry=False))
# Load default environment variables (.env)
load_dotenv()
# Engine configuration
# Model: GPT, LLAMA, HUMAN, etc.
LLM_MODEL = os.getenv("LLM_MODEL", os.getenv("OPENAI_API_MODEL", "gpt-3.5-turbo")).lower()
# API Keys
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
if not (LLM_MODEL.startswith("llama") or LLM_MODEL.startswith("human")):
assert OPENAI_API_KEY, "\033[91m\033[1m" + "OPENAI_API_KEY environment variable is missing from .env" + "\033[0m\033[0m"
# Table config
RESULTS_STORE_NAME = os.getenv("RESULTS_STORE_NAME", os.getenv("TABLE_NAME", ""))
assert RESULTS_STORE_NAME, "\033[91m\033[1m" + "RESULTS_STORE_NAME environment variable is missing from .env" + "\033[0m\033[0m"
# Run configuration
INSTANCE_NAME = os.getenv("INSTANCE_NAME", os.getenv("BABY_NAME", "BabyAGI"))
COOPERATIVE_MODE = "none"
JOIN_EXISTING_OBJECTIVE = False
# Goal configuration
OBJECTIVE = os.getenv("OBJECTIVE", "")
INITIAL_TASK = os.getenv("INITIAL_TASK", os.getenv("FIRST_TASK", ""))
# Model configuration
OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", 0.0))
# Extensions support begin
def can_import(module_name):
try:
importlib.import_module(module_name)
return True
except ImportError:
return False
DOTENV_EXTENSIONS = os.getenv("DOTENV_EXTENSIONS", "").split(" ")
# Command line arguments extension
# Can override any of the above environment variables
ENABLE_COMMAND_LINE_ARGS = (
os.getenv("ENABLE_COMMAND_LINE_ARGS", "false").lower() == "true"
)
if ENABLE_COMMAND_LINE_ARGS:
if can_import("extensions.argparseext"):
from extensions.argparseext import parse_arguments
OBJECTIVE, INITIAL_TASK, LLM_MODEL, DOTENV_EXTENSIONS, INSTANCE_NAME, COOPERATIVE_MODE, JOIN_EXISTING_OBJECTIVE = parse_arguments()
# Human mode extension
# Gives human input to babyagi
if LLM_MODEL.startswith("human"):
if can_import("extensions.human_mode"):
from extensions.human_mode import user_input_await
# Load additional environment variables for enabled extensions
# TODO: This might override the following command line arguments as well:
# OBJECTIVE, INITIAL_TASK, LLM_MODEL, INSTANCE_NAME, COOPERATIVE_MODE, JOIN_EXISTING_OBJECTIVE
if DOTENV_EXTENSIONS:
if can_import("extensions.dotenvext"):
from extensions.dotenvext import load_dotenv_extensions
load_dotenv_extensions(DOTENV_EXTENSIONS)
# TODO: There's still work to be done here to enable people to get
# defaults from dotenv extensions, but also provide command line
# arguments to override them
# Extensions support end
print("\033[95m\033[1m" + "\n*****CONFIGURATION*****\n" + "\033[0m\033[0m")
print(f"Name : {INSTANCE_NAME}")
print(f"Mode : {'alone' if COOPERATIVE_MODE in ['n', 'none'] else 'local' if COOPERATIVE_MODE in ['l', 'local'] else 'distributed' if COOPERATIVE_MODE in ['d', 'distributed'] else 'undefined'}")
print(f"LLM : {LLM_MODEL}")
# Check if we know what we are doing
assert OBJECTIVE, "\033[91m\033[1m" + "OBJECTIVE environment variable is missing from .env" + "\033[0m\033[0m"
assert INITIAL_TASK, "\033[91m\033[1m" + "INITIAL_TASK environment variable is missing from .env" + "\033[0m\033[0m"
LLAMA_MODEL_PATH = os.getenv("LLAMA_MODEL_PATH", "models/llama-13B/ggml-model.bin")
if LLM_MODEL.startswith("llama"):
if can_import("llama_cpp"):
from llama_cpp import Llama
print(f"LLAMA : {LLAMA_MODEL_PATH}" + "\n")
assert os.path.exists(LLAMA_MODEL_PATH), "\033[91m\033[1m" + f"Model can't be found." + "\033[0m\033[0m"
CTX_MAX = 2048
LLAMA_THREADS_NUM = int(os.getenv("LLAMA_THREADS_NUM", 8))
llm = Llama(
model_path=LLAMA_MODEL_PATH,
n_ctx=CTX_MAX,
n_threads=LLAMA_THREADS_NUM,
n_batch=512,
use_mlock=True,
)
llm_embed = Llama(
model_path=LLAMA_MODEL_PATH,
n_ctx=CTX_MAX,
n_threads=LLAMA_THREADS_NUM,
n_batch=512,
embedding=True,
use_mlock=True,
)
print(
"\033[91m\033[1m"
+ "\n*****USING LLAMA.CPP. POTENTIALLY SLOW.*****"
+ "\033[0m\033[0m"
)
else:
print(
"\033[91m\033[1m"
+ "\nLlama LLM requires package llama-cpp. Falling back to GPT-3.5-turbo."
+ "\033[0m\033[0m"
)
LLM_MODEL = "gpt-3.5-turbo"
if LLM_MODEL.startswith("gpt-4"):
print(
"\033[91m\033[1m"
+ "\n*****USING GPT-4. POTENTIALLY EXPENSIVE. MONITOR YOUR COSTS*****"
+ "\033[0m\033[0m"
)
if LLM_MODEL.startswith("human"):
print(
"\033[91m\033[1m"
+ "\n*****USING HUMAN INPUT*****"
+ "\033[0m\033[0m"
)
print("\033[94m\033[1m" + "\n*****OBJECTIVE*****\n" + "\033[0m\033[0m")
print(f"{OBJECTIVE}")
if not JOIN_EXISTING_OBJECTIVE:
print("\033[93m\033[1m" + "\nInitial task:" + "\033[0m\033[0m" + f" {INITIAL_TASK}")
else:
print("\033[93m\033[1m" + f"\nJoining to help the objective" + "\033[0m\033[0m")
# Configure OpenAI
openai.api_key = OPENAI_API_KEY
# Llama embedding function
class LlamaEmbeddingFunction(EmbeddingFunction):
def __init__(self):
return
def __call__(self, texts: Documents) -> Embeddings:
embeddings = []
for t in texts:
e = llm_embed.embed(t)
embeddings.append(e)
return embeddings
# Results storage using local ChromaDB
class DefaultResultsStorage:
def __init__(self):
logging.getLogger('chromadb').setLevel(logging.ERROR)
# Create Chroma collection
chroma_persist_dir = "chroma"
chroma_client = chromadb.Client(
settings=chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=chroma_persist_dir,
)
)
metric = "cosine"
if LLM_MODEL.startswith("llama"):
embedding_function = LlamaEmbeddingFunction()
else:
embedding_function = OpenAIEmbeddingFunction(api_key=OPENAI_API_KEY)
self.collection = chroma_client.get_or_create_collection(
name=RESULTS_STORE_NAME,
metadata={"hnsw:space": metric},
embedding_function=embedding_function,
)
def add(self, task: Dict, result: str, result_id: str):
# Break the function if LLM_MODEL starts with "human" (case-insensitive)
if LLM_MODEL.startswith("human"):
return
# Continue with the rest of the function
embeddings = llm_embed.embed(result) if LLM_MODEL.startswith("llama") else None
if (
len(self.collection.get(ids=[result_id], include=[])["ids"]) > 0
): # Check if the result already exists
self.collection.update(
ids=result_id,
embeddings=embeddings,
documents=result,
metadatas={"task": task["task_name"], "result": result},
)
else:
self.collection.add(
ids=result_id,
embeddings=embeddings,
documents=result,
metadatas={"task": task["task_name"], "result": result},
)
def query(self, query: str, top_results_num: int) -> List[dict]:
count: int = self.collection.count()
if count == 0:
return []
results = self.collection.query(
query_texts=query,
n_results=min(top_results_num, count),
include=["metadatas"]
)
return [item["task"] for item in results["metadatas"][0]]
# Initialize results storage
results_storage = DefaultResultsStorage()
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY", "")
if PINECONE_API_KEY:
if can_import("extensions.pinecone_storage"):
PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT", "")
assert (
PINECONE_ENVIRONMENT
), "\033[91m\033[1m" + "PINECONE_ENVIRONMENT environment variable is missing from .env" + "\033[0m\033[0m"
from extensions.pinecone_storage import PineconeResultsStorage
results_storage = PineconeResultsStorage(OPENAI_API_KEY, PINECONE_API_KEY, PINECONE_ENVIRONMENT, LLM_MODEL,
LLAMA_MODEL_PATH, RESULTS_STORE_NAME, OBJECTIVE)
print("\nReplacing results storage: " + "\033[93m\033[1m" + "Pinecone" + "\033[0m\033[0m")
# Task storage supporting only a single instance of BabyAGI
class SingleTaskListStorage:
def __init__(self):
self.tasks = deque([])
self.task_id_counter = 0
def append(self, task: Dict):
self.tasks.append(task)
def replace(self, tasks: List[Dict]):
self.tasks = deque(tasks)
def popleft(self):
return self.tasks.popleft()
def is_empty(self):
return False if self.tasks else True
def next_task_id(self):
self.task_id_counter += 1
return self.task_id_counter
def get_task_names(self):
return [t["task_name"] for t in self.tasks]
# Initialize tasks storage
tasks_storage = SingleTaskListStorage()
if COOPERATIVE_MODE in ['l', 'local']:
if can_import("extensions.ray_tasks"):
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent))
from extensions.ray_tasks import CooperativeTaskListStorage
tasks_storage = CooperativeTaskListStorage(OBJECTIVE)
print("\nReplacing tasks storage: " + "\033[93m\033[1m" + "Ray" + "\033[0m\033[0m")
elif COOPERATIVE_MODE in ['d', 'distributed']:
pass
def limit_tokens_from_string(string: str, model: str, limit: int) -> str:
"""Limits the string to a number of tokens (estimated)."""
try:
encoding = tiktoken.encoding_for_model(model)
except:
encoding = tiktoken.encoding_for_model('gpt2') # Fallback for others.
encoded = encoding.encode(string)
return encoding.decode(encoded[:limit])
def openai_call(
prompt: str,
model: str = LLM_MODEL,
temperature: float = OPENAI_TEMPERATURE,
max_tokens: int = 100,
):
while True:
try:
if model.lower().startswith("llama"):
result = llm(prompt[:CTX_MAX], stop=["### Human"], echo=False, temperature=0.2)
return str(result['choices'][0]['text'].strip())
elif model.lower().startswith("human"):
return user_input_await(prompt)
elif not model.lower().startswith("gpt-"):
# Use completion API
response = openai.Completion.create(
engine=model,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
return response.choices[0].text.strip()
else:
# Use 4000 instead of the real limit (4097) to give a bit of wiggle room for the encoding of roles.
# TODO: different limits for different models.
trimmed_prompt = limit_tokens_from_string(prompt, model, 4000 - max_tokens)
# Use chat completion API
messages = [{"role": "system", "content": trimmed_prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
n=1,
stop=None,
)
return response.choices[0].message.content.strip()
except openai.error.RateLimitError:
print(
" *** The OpenAI API rate limit has been exceeded. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.Timeout:
print(
" *** OpenAI API timeout occurred. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.APIError:
print(
" *** OpenAI API error occurred. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.APIConnectionError:
print(
" *** OpenAI API connection error occurred. Check your network settings, proxy configuration, SSL certificates, or firewall rules. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.InvalidRequestError:
print(
" *** OpenAI API invalid request. Check the documentation for the specific API method you are calling and make sure you are sending valid and complete parameters. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.ServiceUnavailableError:
print(
" *** OpenAI API service unavailable. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
else:
break
def task_creation_agent(
objective: str, result: Dict, task_description: str, task_list: List[str]
):
prompt = f"""
You are to use the result from an execution agent to create new tasks with the following objective: {objective}.
The last completed task has the result: \n{result["data"]}
This result was based on this task description: {task_description}.\n"""
if task_list:
prompt += f"These are incomplete tasks: {', '.join(task_list)}\n"
prompt += "Based on the result, create a list of new tasks to be completed in order to meet the objective. "
if task_list:
prompt += "These new tasks must not overlap with incomplete tasks. "
prompt += """
Return all the new tasks, with one task per line in your response. The result must be a numbered list in the format:
#. First task
#. Second task
The number of each entry must be followed by a period.
Do not include any headers before your numbered list. Do not follow your numbered list with any other output."""
print(f'\n************** TASK CREATION AGENT PROMPT *************\n{prompt}\n')
response = openai_call(prompt, max_tokens=2000)
print(f'\n************* TASK CREATION AGENT RESPONSE ************\n{response}\n')
new_tasks = response.split('\n')
new_tasks_list = []
for task_string in new_tasks:
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
if task_name.strip() and task_id.isnumeric():
new_tasks_list.append(task_name)
# print('New task created: ' + task_name)
out = [{"task_name": task_name} for task_name in new_tasks_list]
return out
def prioritization_agent():
task_names = tasks_storage.get_task_names()
next_task_id = tasks_storage.next_task_id()
prompt = f"""
You are tasked with cleaning the format and re-prioritizing the following tasks: {', '.join(task_names)}.
Consider the ultimate objective of your team: {OBJECTIVE}.
Tasks should be sorted from highest to lowest priority.
Higher-priority tasks are those that act as pre-requisites or are more essential for meeting the objective.
Do not remove any tasks. Return the result as a numbered list in the format:
#. First task
#. Second task
The entries are consecutively numbered, starting with 1. The number of each entry must be followed by a period.
Do not include any headers before your numbered list. Do not follow your numbered list with any other output."""
print(f'\n************** TASK PRIORITIZATION AGENT PROMPT *************\n{prompt}\n')
response = openai_call(prompt, max_tokens=2000)
print(f'\n************* TASK PRIORITIZATION AGENT RESPONSE ************\n{response}\n')
new_tasks = response.split("\n") if "\n" in response else [response]
new_tasks_list = []
for task_string in new_tasks:
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
if task_name.strip():
new_tasks_list.append({"task_id": task_id, "task_name": task_name})
tasks_storage.replace(new_tasks_list)
# Execute a task based on the objective and five previous tasks
def execution_agent(objective: str, task: str) -> str:
"""
Executes a task based on the given objective and previous context.
Args:
objective (str): The objective or goal for the AI to perform the task.
task (str): The task to be executed by the AI.
Returns:
str: The response generated by the AI for the given task.
"""
context = context_agent(query=objective, top_results_num=5)
# print("\n*******RELEVANT CONTEXT******\n")
# print(context)
# print('')
prompt = f'Perform one task based on the following objective: {objective}.\n'
if context:
prompt += 'Take into account these previously completed tasks:' + '\n'.join(context)\
prompt += f'\nYour task: {task}\nResponse:'
return openai_call(prompt, max_tokens=2000)
# Get the top n completed tasks for the objective
def context_agent(query: str, top_results_num: int):
"""
Retrieves context for a given query from an index of tasks.
Args:
query (str): The query or objective for retrieving context.
top_results_num (int): The number of top results to retrieve.
Returns:
list: A list of tasks as context for the given query, sorted by relevance.
"""
results = results_storage.query(query=query, top_results_num=top_results_num)
# print("***** RESULTS *****")
# print(results)
return results
# Add the initial task if starting new objective
if not JOIN_EXISTING_OBJECTIVE:
initial_task = {
"task_id": tasks_storage.next_task_id(),
"task_name": INITIAL_TASK
}
tasks_storage.append(initial_task)
def main():
loop = True
while loop:
# As long as there are tasks in the storage...
if not tasks_storage.is_empty():
# Print the task list
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
for t in tasks_storage.get_task_names():
print(" • " + str(t))
# Step 1: Pull the first incomplete task
task = tasks_storage.popleft()
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
print(str(task["task_name"]))
# Send to execution function to complete the task based on the context
result = execution_agent(OBJECTIVE, str(task["task_name"]))
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
print(result)
# Step 2: Enrich result and store in the results storage
# This is where you should enrich the result if needed
enriched_result = {
"data": result
}
# extract the actual result from the dictionary
# since we don't do enrichment currently
# vector = enriched_result["data"]
result_id = f"result_{task['task_id']}"
results_storage.add(task, result, result_id)
# Step 3: Create new tasks and re-prioritize task list
# only the main instance in cooperative mode does that
new_tasks = task_creation_agent(
OBJECTIVE,
enriched_result,
task["task_name"],
tasks_storage.get_task_names(),
)
print('Adding new tasks to task_storage')
for new_task in new_tasks:
new_task.update({"task_id": tasks_storage.next_task_id()})
print(str(new_task))
tasks_storage.append(new_task)
if not JOIN_EXISTING_OBJECTIVE: prioritization_agent()
# Sleep a bit before checking the task list again
time.sleep(5)
else:
print('Done.')
loop = False
if __name__ == "__main__":
main()
| [
"\n",
"Take into account these previously completed tasks:",
"\nYour task: PLACEHOLDER\nResponse:",
"Perform one task based on the following objective: PLACEHOLDER.\n",
"Based on the result, create a list of new tasks to be completed in order to meet the objective. ",
"\nYou are to use the result from an execution agent to create new tasks with the following objective: PLACEHOLDER.\nThe last completed task has the result: \nPLACEHOLDER\nThis result was based on this task description: PLACEHOLDER.\n",
", ",
"\nReturn all the new tasks, with one task per line in your response. The result must be a numbered list in the format:\n \n#. First task\n#. Second task\n \nThe number of each entry must be followed by a period.\nDo not include any headers before your numbered list. Do not follow your numbered list with any other output.",
"These new tasks must not overlap with incomplete tasks. "
] |
2024-01-10 | 04diiguyi/LangchainExample | src~app_param_multi_agent.py | # To run: In the current folder:
# python app_param_multi_agent.py
# This example is a sample that create an agent which helps
# check tire inventory and calculate the sale price of tires.
# This agent has three tools, one math tool, one checks inventory,
# one fetches tire price. In this demo, price/inventory tools give
# hard coded responses, however, we can use API tool,
# SQL tool and even chain tool to
# customize it for real business logic.
#
# Please note price tool takes one string as input, and inventory tool
# takes two parameters. That's why we need an agent whose
# type is STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION
#
# The backend of llmchain uses OpenAI gpt3.5 chat model which is not a correct approach
# Example response. The answer varys every time, with or without prompt optimization
# > Entering new AgentExecutor chain...
# Thought: I can use the `inventory_api` tool to check if the Issaquah store has
# enough Goodyear tires in stock. Then, I can use the `Search Price` tool to get the total price of four Goodyear tires.
#
#Action:
#```
#{
# "action": "inventory_api",
# "action_input": {
# "tire": "Goodyear",
# "store": "Issaquah"
# }
#}
#```
#
#Observation: There are 10 Goodyear available in store Issaquah.
#Thought:Now that I know there are 10 Goodyear tires available in the Issaquah store,
#I can use the `Search Price` tool to get the total price of four Goodyear tires.
#
#Action:
#```
#{
# "action": "Search Price",
# "action_input": "What is the price of four Goodyear tires?"
#}
#```
#
#Observation: Tire What is the price of four Goodyear tires? is $150 each.
#Thought:Now that I know the price of each tire, I can calculate the total price of four Goodyear tires.
#
#Action:
#```
#{
# "action": "Final Answer",
# "action_input": "The total price of four Goodyear tires in the Issaquah store is $600."
#}
#```
#
#> Finished chain.
#The total price of four Goodyear tires in the Issaquah store is $600.
#
#> Entering new AgentExecutor chain...
#Thought: I can use the `inventory_api` tool to check if the Issaquah store
# has enough Good Year tires in stock. Then, I can use the `Search Price` tool to
# get the price of one Good Year tire and calculate the total price for 30 tires.
#
#Action:
#```
#{
# "action": "inventory_api",
# "action_input": {
# "tire": "Good Year",
# "store": "Issaquah"
# }
#}
#```
#
#Observation: There are 10 Good Year available in store Issaquah.
#Thought:Since there are only 10 Good Year tires available in the Issaquah store,
#the customer cannot buy 30 tires from that store. I need to inform the customer about this.
#
#Action:
#```
#{
# "action": "Final Answer",
# "action_input": "I'm sorry, but there are only 10 Good Year tires available in the Issaquah store.
# We cannot fulfill your request for 30 tires from that store."
#}
#```
#
#> Finished chain.
#I'm sorry, but there are only 10 Good Year tires available in the Issaquah store.
# We cannot fulfill your request for 30 tires from that store.
import os
from langchain.agents import initialize_agent, Tool, AgentType
from langchain.tools import StructuredTool
from langchain.chat_models import AzureChatOpenAI
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35
from langchain import LLMMathChain
from tools.tool_price import price_api
from tools.tool_inventory import inventory_api
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
# Check the existence of tools
print(price_api)
print(inventory_api)
## Set up OpenAI as chat LLM
chat = AzureChatOpenAI(deployment_name=Az_Open_Deployment_name_gpt35,
openai_api_version="2023-03-15-preview", temperature=0)
llm_math_chain = LLMMathChain(llm=chat)
tools = [
Tool(
name = "Search Price",
func=price_api.run,
description="useful for when you need to answer the price of tires"
),
StructuredTool.from_function(inventory_api),
]
agent = initialize_agent(tools, chat, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
#response = agent.run("I want to buy four good year tires in my local Issaquah store, \
# do we have enough in stock and how much is the total price?")
#print(response)
#response = agent.run("I want to buy 30 good year tires in my local Issaquah store, \
# do we have enough in stock and how much is the total price?")
#print(response)
# Hallucination error: we do not provide store info, OpenAI hallucinate my_store to fill in the parameter `store`
#Action:
#```
#{
# "action": "inventory_api",
# "action_input": {
# "tire": "goodyear",
# "store": "my_store"
# }
#}
#```
#Observation: There are 10 goodyear available in store my_store.
#Thought:Action:
#```
#{
# "action": "Search Price",
# "action_input": "goodyear tires"
#}
#```
# ...
response = agent.run("I want to buy four good year tires, \
do we have enough in stock and how much is the total price?")
print(response) | [] |
2024-01-10 | 04diiguyi/LangchainExample | src~IncorrectSamples~az_openAI_chain_incorrect_sample.py | # To run: In the current folder:
# python az_openAI_chain_incorrect_sample.py
# This example is an incorrect sample that directly uses gpt3.5 as a llm instead
# of a chat model. Since it should be a chat model, the chatbot cannot stop
# chatting with itself, one response as an example:
# Why did the tomato turn red? Because it saw the salad dressing. (laughing)
# I'm glad you're enjoying yourself.
# I'm having a great time. (laughing)
# Oh, I'm sorry. (laughing)
# We're gonna have to go. (laughing)
# I'm sorry. (laughing)
# I'm sorry. (laughing)
# I'm sorry. (laughing)
# I'm sorry. (laughing)
# I'm sorry. (laughing)
# I'm sorry. (laughing)
# I'm sorry.
# I'm sorry. (laughing)
# I'm sorry.
# I'm sorry. (laughing)
# I'm sorry.
# ...
import sys
sys.path.append('../')
import os
from langchain import PromptTemplate, LLMChain
from langchain.llms import AzureOpenAI
from custom_llm import CustomLLM
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2022-12-01" #"2023-05-15"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
llm = CustomLLM()
result = llm("Tell me a joke")
print(result)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
result = llm_chain.run(question)
print(result)
myllm = AzureOpenAI(
deployment_name=Az_Open_Deployment_name_gpt35,
model_name="gpt-35-turbo",
)
result = myllm("Tell me a joke")
print(result)
| [
"question",
"Question: {question}\n\nAnswer: Let's think step by step."
] |
2024-01-10 | 04diiguyi/LangchainExample | src~az_openAI_llmchain_sample.py | # To run: In the current folder:
# python az_openAI_llmchain_sample.py
# This example is a sample that uses OpenAI gpt3 as a llm, and then create
# a LLMchain using this llm
# Example response
# Justin Beiber was born in 1994,
# so the NFL team that won the Super Bowl that year was the Dallas Cowboys.
import os
from langchain.llms import AzureOpenAI
from langchain import PromptTemplate, LLMChain
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt3
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2022-12-01"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
llm = AzureOpenAI(
deployment_name=Az_Open_Deployment_name_gpt3,
model_name="text-davinci-003",
)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
result = llm_chain.run(question)
print(result)
| [
"question",
"Question: {question}\n\nAnswer: Let's think step by step."
] |
2024-01-10 | 04diiguyi/LangchainExample | src~app_param_single_agent.py | # To run: In the current folder:
# python app_param_single_agent.py
# This example is a sample that create an agent which helps calculate the
# sale price of tires. This agent has two tools, one math tool,
# one fetches tire price. In this demo, price tool gives a hard coded response,
# however, we can use API tool, SQL tool and even chain tool to
# customize it for real business logic.
#
# Please note this agent only support tools with one string input, if you need
# tools with multiple parameters, please refer to app_param_multi_agent.py
#
# The backend of llmchain uses OpenAI gpt3.5 chat model which is not a correct approach
# Example response
# > Entering new AgentExecutor chain...
# I need to find the price of Good Year tires
# Action: Search Price
# Action Input: "Good Year tires price"
# Observation: Tire Good Year tires price is $150 each.
# Thought:I need to calculate the total cost for four tires
# Action: Calculator
# Action Input: 150 x 4
# Observation: Answer: 600
# > Finished chain.
# answer: 600
import os
from langchain import LLMMathChain
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.chat_models import AzureChatOpenAI
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35
from tools.tool_price import price_api
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
# Check the existence of tools
print(price_api)
## Set up OpenAI as chat LLM
chat = AzureChatOpenAI(deployment_name=Az_Open_Deployment_name_gpt35,
openai_api_version="2023-03-15-preview", temperature=0)
llm_math_chain = LLMMathChain(llm=chat)
tools = [
Tool(
name = "Search Price",
func=price_api.run,
description="useful for when you need to answer the price of tires"
),
Tool(
name="Calculator",
func=llm_math_chain.run,
description="useful for when you need to answer questions about math",
return_direct=True
)
]
agent = initialize_agent(tools, chat, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
response = agent.run("How much are four good year tires?")
print(response)
| [] |
2024-01-10 | 04diiguyi/LangchainExample | src~IncorrectSamples~custom_llm.py | from typing import List, Optional
import openai
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35
class CustomLLM(LLM):
openai.api_key = Az_OpenAI_api_key
openai.api_base = Az_OpenAI_endpoint
openai.api_type = 'azure'
openai.api_version = '2023-05-15'
@property
def _llm_type(self) -> str:
return "custom"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
deployment_name=Az_Open_Deployment_name_gpt35
# Send a completion call to generate an answer
response = openai.Completion.create(engine=deployment_name, prompt=prompt, max_tokens=256, temperature=0.4,n=1)
text = response['choices'][0]['text']
return text | [] |
2024-01-10 | 04diiguyi/LangchainExample | src~az_openAI_chat_plain_json.py | import os
import json
from langchain.chat_models import AzureChatOpenAI
from langchain import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35
class Car_inventory_Info:
"""class contains car information"""
def __init__(self, id, name, model, color, drive, inventory, extra_info):
self.id = id
self.name = name
self.model = model
self.color = color
self.drive = drive
self.inventory = inventory
self.extra_info = extra_info
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
chat = AzureChatOpenAI(deployment_name=Az_Open_Deployment_name_gpt35,
openai_api_version="2023-03-15-preview", temperature=0)
opening_sentense = "You are a json translator that translates a json string to a human readable sentence. "
detail_message1 = "If field `drive` is `A`, it means all wheel drive, if field `drive` is `F`, it means front wheel drive, if field `drive` is `R`, it means rear wheel drive."
# Please note if you do not say inventory is for months, or remove this detail in prompt, OpenAI will treat the integer of inventory as the number of vehicles in stock
detail_message2 = "If field `inventory` is `Stock` it means the vehicle is available in stock, if field `inventory` is `Transit`, it means the vehicle is in transit, if field `inventory` is an integer, it means the buyer needs to wait for that amount of months to get the vehicle, if the value is larger than 24, it means we no longer accept orders to the vehicle."
detail_message3 = "If field `extra_info` is `rim` it means we need rim size from the customer, if field `extra_info` is `None`, it means we do not need extra information, if field `extra_info` is `body`, it means we need car body information."
detail_message4 = "If field `name` is `Ford`, `model` is `Mustang`, `color` is `classic`, please use `bumblebee` instead of name, model, color to call this car."
template=opening_sentense + detail_message1 + detail_message2 + detail_message3 + detail_message4
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template="{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
chain = LLMChain(llm=chat, prompt=chat_prompt)
# get a chat completion from the formatted messages
# Car with ID 001 is a White Ford Explorer with front wheel drive and is currently in stock. No extra information is needed.
#car_inventory_Info = Car_inventory_Info('001', 'Ford', 'Explorer', 'White', 'F', 'Stock', 'None')
#json_string = json.dumps(car_inventory_Info, default=vars)
#print(json_string)
#response = chain.run(json_string)
# Vehicle with ID 002 is a red BMW X7 with all wheel drive. It is currently not in stock and the buyer needs to wait for 6 months to get the vehicle. We need rim size information from the customer.
#car_inventory_Info = Car_inventory_Info('002', 'BMW', 'X7', 'Red', 'A', '6', 'rim')
#json_string = json.dumps(car_inventory_Info, default=vars)
#print(json_string)
#response = chain.run(json_string)
# Vehicle with ID 002 is a red BMW X3 with all wheel drive. This vehicle is currently not available for purchase as it will take 26 months to arrive. We require the rim size from the customer before we can proceed with the purchase.
#car_inventory_Info = Car_inventory_Info('002', 'BMW', 'X3', 'Red', 'A', '26', 'rim')
#json_string = json.dumps(car_inventory_Info, default=vars)
#print(json_string)
#response = chain.run(json_string)
# Car with ID 003 is a black Toyota Rav 4 with rear wheel drive. It is currently in transit and we need information about the car body.
#car_inventory_Info = Car_inventory_Info('003', 'Toyota', 'Rav 4', 'Black', 'R', 'Transit', 'body')
#json_string = json.dumps(car_inventory_Info, default=vars)
#print(json_string)
#response = chain.run(json_string)
# Vehicle with ID 004 is a bumblebee with all wheel drive, available in 3 months. No extra information is needed.
car_inventory_Info = Car_inventory_Info('004', 'Ford', 'Mustang', 'classic', 'A', '3', 'None')
json_string = json.dumps(car_inventory_Info, default=vars)
print(json_string)
response = chain.run(json_string)
print(response)
| [
"You are a json translator that translates a json string to a human readable sentence. If field `drive` is `A`, it means all wheel drive, if field `drive` is `F`, it means front wheel drive, if field `drive` is `R`, it means rear wheel drive.If field `inventory` is `Stock` it means the vehicle is available in stock, if field `inventory` is `Transit`, it means the vehicle is in transit, if field `inventory` is an integer, it means the buyer needs to wait for that amount of months to get the vehicle, if the value is larger than 24, it means we no longer accept orders to the vehicle.If field `extra_info` is `rim` it means we need rim size from the customer, if field `extra_info` is `None`, it means we do not need extra information, if field `extra_info` is `body`, it means we need car body information.If field `name` is `Ford`, `model` is `Mustang`, `color` is `classic`, please use `bumblebee` instead of name, model, color to call this car.",
"[PLACEHOLDER, PLACEHOLDER]",
"opening_sentense2d404f29-12f3-4036-a61a-457caaf397cfdetail_message12d404f29-12f3-4036-a61a-457caaf397cfdetail_message22d404f29-12f3-4036-a61a-457caaf397cfdetail_message32d404f29-12f3-4036-a61a-457caaf397cfIf field `name` is `Ford`, `model` is `Mustang`, `color` is `classic`, please use `bumblebee` instead of name, model, color to call this car.",
"{text}"
] |
2024-01-10 | 04diiguyi/LangchainExample | src~az_openAI_chat_as_llm.py | # To run: In the current folder:
# python az_openAI_chat_as_llm.py
# This example is a sample that uses OpenAI gpt3.5 as a llmchain
# Please note the current AI message example does not function as expected.
# We need more prompt engineering to get it working. However, this sample code
# is just for demo purpose to show the functionalities.
# Example response
# Chicago is in the state of Illinois.
import os
from langchain.chat_models import AzureChatOpenAI
from langchain import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
chat = AzureChatOpenAI(deployment_name=Az_Open_Deployment_name_gpt35,
openai_api_version="2023-03-15-preview", temperature=0)
template="You are a helpful assistant that helps user to find information."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
example_human = HumanMessagePromptTemplate.from_template("Hi, which state is Seattle in?")
example_ai = AIMessagePromptTemplate.from_template("Washington")
human_template="{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, example_human, example_ai, human_message_prompt])
chain = LLMChain(llm=chat, prompt=chat_prompt)
# get a chat completion from the formatted messages
response = chain.run("Which state is Chicago in?")
print(response) | [
"You are a helpful assistant that helps user to find information.",
"Washington",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"Hi, which state is Seattle in?",
"{text}"
] |
2024-01-10 | 04diiguyi/LangchainExample | src~app_param_multi_agent_with_validation.py | # To run: In the current folder:
# python app_param_multi_agent_with_validation.py
import os
from langchain.agents import initialize_agent, Tool, AgentType, load_tools
from langchain.tools import StructuredTool, tool
from langchain.llms import AzureOpenAI
from langchain.chat_models import AzureChatOpenAI
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35, Az_Open_Deployment_name_gpt3
from tools.tool_inventory_with_validation import inventory_api_v2
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
## Set up OpenAI as chat LLM
chat = AzureChatOpenAI(deployment_name=Az_Open_Deployment_name_gpt35,
openai_api_version="2023-03-15-preview", temperature=0)
tools = load_tools(
["human"]
)
tools.append(StructuredTool.from_function(inventory_api_v2))
agent = initialize_agent(tools, chat, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
#> Entering new AgentExecutor chain...
#Action:
#```
#{
# "action": "inventory_api_v2",
# "action_input": "Do we have enough Goodyear tires in stock?"
#}
#```
#prompt: Extract `tire` and `store` information from Do we have enough Goodyear tires in stock? and create a json object. If the information is missing, leave it empty. Example: I want to buy Michelin tires from store Issaquah becomes {"store": "Issaquah", "tire": "Michelin"}. Example: I want to buy tires at my local store Bellevue becomes {"store": "Bellevue", "tire": ""}. Example: I want to buy four goodyear tires becomes {"store": "", "tire": "goodyear"}.
#inventory_api_extract_info:
#
#{"store": "", "tire": "Goodyear"}
#
#Observation: Please ask human to provide `store` information
#Thought:What is the `store` information?
#Action:
#```
#{
# "action": "Human",
# "action_input": "Can you please provide me with the `store` information?"
#}
#```
#
#Can you please provide me with the `store` information?
#Redmond
#
#Observation: Redmond
#Thought:Action:
#```
#{
# "action": "inventory_api_v2",
# "action_input": "Do we have enough Goodyear tires in stock at Redmond?"
#}
#```
#
#prompt: Extract `tire` and `store` information from Do we have enough Goodyear tires in stock at Redmond? and create a json object. If the information is missing, leave it empty. Example: I want to buy Michelin tires from store Issaquah becomes {"store": "Issaquah", "tire": "Michelin"}. Example: I want to buy tires at my local store Bellevue becomes {"store": "Bellevue", "tire": ""}. Example: I want to buy four goodyear tires becomes {"store": "", "tire": "goodyear"}.
#inventory_api_extract_info:
#
#{"store": "Redmond", "tire": "Goodyear"}
#
#Observation: There are 10 Goodyear available in store Redmond.
#Thought:Based on the inventory information, there are 10 Goodyear tires available in the Redmond store.
#
#> Finished chain.
#Based on the inventory information, there are 10 Goodyear tires available in the Redmond store.
###response = agent.run("I want to buy four goodyear tires and do we have enough in stock?")
##################################################################################################
#> Entering new AgentExecutor chain...
#Action:
#```
#{
# "action": "inventory_api_v2",
# "action_input": "How many Goodyear tires are available in the Issaquah store?"
#}
#```
#prompt: Extract `tire` and `store` information from How many Goodyear tires are available in the Issaquah store? and create a json object. If the information is missing, leave it empty. Example: I want to buy Michelin tires from store Issaquah becomes {"store": "Issaquah", "tire": "Michelin"}. Example: I want to buy tires at my local store Bellevue becomes {"store": "Bellevue", "tire": ""}. Example: I want to buy four goodyear tires becomes {"store": "", "tire": "goodyear"}.
#inventory_api_extract_info:
#
#{"store": "Issaquah", "tire": "Goodyear"}
#
#Observation: There are 10 Goodyear available in store Issaquah.
#Thought:The human wants to buy four Goodyear tires from the Issaquah store and wants to know if there are enough tires in stock. I can use the `inventory_api_v2` tool to check the inventory information for Goodyear tires in the Issaquah store.
#
#Action:
#```
#{
# "action": "inventory_api_v2",
# "action_input": "How many Goodyear tires are available in the Issaquah store?"
#}
#```
#
#prompt: Extract `tire` and `store` information from How many Goodyear tires are available in the Issaquah store? and create a json object. If the information is missing, leave it empty. Example: I want to buy Michelin tires from store Issaquah becomes {"store": "Issaquah", "tire": "Michelin"}. Example: I want to buy tires at my local store Bellevue becomes {"store": "Bellevue", "tire": ""}. Example: I want to buy four goodyear tires becomes {"store": "", "tire": "goodyear"}.
#inventory_api_extract_info:
#
#{"store": "Issaquah", "tire": "Goodyear"}
#
#Observation: There are 10 Goodyear available in store Issaquah.
#Thought:Based on the previous observation, there are 10 Goodyear tires available in the Issaquah store. The human wants to buy four tires, so there should be enough in stock.
#
#Action:
#```
#{
# "action": "Final Answer",
# "action_input": "Yes, there are enough Goodyear tires in stock at the Issaquah store to buy four."
#}
#```
#
#> Finished chain.
#Yes, there are enough Goodyear tires in stock at the Issaquah store to buy four.
response = agent.run("I want to buy four goodyear tires in Issaquah store and do we have enough in stock?")
##################################################################################################
#> Entering new AgentExecutor chain...
#Action:
#```
#{
# "action": "Human",
# "action_input": "Can you please provide me with the store name and tire brand you are interested in?"
#}
#```
#
#Can you please provide me with the store name and tire brand you are interested in?
#response = agent.run("I want to buy four tires and do we have enough in stock?")
##################################################################################################
#> Entering new AgentExecutor chain...
#Action:
#```
#{
# "action": "inventory_api_v2",
# "action_input": "Do we have enough tires in stock in Redmond store?"
#}
#```
#prompt: Extract `tire` and `store` information from Do we have enough tires in stock in Redmond store? and create a json object. If the information is missing, leave it empty. Example: I want to buy Michelin tires from store Issaquah becomes {"store": "Issaquah", "tire": "Michelin"}. Example: I want to buy tires at my local store Bellevue becomes {"store": "Bellevue", "tire": ""}. Example: I want to buy four goodyear tires becomes {"store": "", "tire": "goodyear"}.
#inventory_api_extract_info:
#
#{"store": "Redmond", "tire": ""}
#
#Observation: Please ask human to provide `tire` information
#Thought:What type of tire are you looking for? This information is required to check the inventory.
#
#Action:
#```
#{
# "action": "Human",
# "action_input": "What type of tire are you looking for?"
#}
#```
#
#What type of tire are you looking for?
#goodyear
#
#Observation: goodyear
#Thought:Thank you for providing the tire information.
#
#Action:
#```
#{
# "action": "inventory_api_v2",
# "action_input": "Do we have enough Goodyear tires in stock in Redmond store?"
#}
#```
#
#prompt: Extract `tire` and `store` information from Do we have enough Goodyear tires in stock in Redmond store? and create a json object. If the information is missing, leave it empty. Example: I want to buy Michelin tires from store Issaquah becomes {"store": "Issaquah", "tire": "Michelin"}. Example: I want to buy tires at my local store Bellevue becomes {"store": "Bellevue", "tire": ""}. Example: I want to buy four goodyear tires becomes {"store": "", "tire": "goodyear"}.
#inventory_api_extract_info:
#
#{"store": "Redmond", "tire": "Goodyear"}
#
#Observation: There are 10 Goodyear available in store Redmond.
#Thought:We have 10 Goodyear tires available in the Redmond store.
#
#Action:
#```
#{
# "action": "Final Answer",
# "action_input": "Yes, we have enough Goodyear tires in stock. There are 10 available in the Redmond store."
#}
#```
#
#> Finished chain.
#Yes, we have enough Goodyear tires in stock. There are 10 available in the Redmond store.
###response = agent.run("I want to buy four tires and do we have enough in stock? I am in Redmond store.")
print(response) | [] |
2024-01-10 | 04diiguyi/LangchainExample | src~IncorrectSamples~app_param_multi_chat_agent.py | # To run: In the current folder:
# python python app_param_multi_chat_agent.py
# This example is a sample that create an agent which helps
# check tire inventory and calculate the sale price of tires.
# This agent has three tools, one math tool, one checks inventory,
# one fetches tire price. In this demo, price/inventory tools give
# hard coded responses, however, we can use API tool,
# SQL tool and even chain tool to
# customize it for real business logic.
#
# Please note price tool takes one string as input, and inventory tool
# takes two parameters. That's why we need an agent whose
# type is STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION
#
# The backend of llmchain uses OpenAI gpt3.5 chat model
#
# Example response. The answer is incorrect, OpenAI misunderstands both numbers.
# The tire should be 10 only, and the price each is $150
#
# > Entering new AgentExecutor chain...
# Thought: I need to check the inventory and price for four Good Year tires in the Issaquah store.
# Action 1: inventory_api
# Action Input 1: tire="Good Year", store="Issaquah"
# Observation 1: "10" (assuming the inventory_api returns the number of tires in stock)
# Thought 2: There are 10 Good Year tires in stock, so I can buy four.
# Action 2: Search Price
# Action Input 2: "Good Year tires"
# Observation 2: "$100 per tire" (assuming the Search Price tool returns the price)
# Thought 3: The price of one tire is $100, so the total price for four tires is $400.
# Action 3: Calculator
# Action Input 3: 4 * 100
# Observation 3: "$400"
# Thought 4: I now know the final answer.
# Final Answer: The Issaquah store has 10 Good Year tires in stock, and the total price for four tires is $400.
#
# > Finished chain.
# The Issaquah store has 10 Good Year tires in stock, and the total price for four tires is $400.
#
# > Entering new AgentExecutor chain...
# Thought: I need to check the inventory and calculate the total price.
# Action 1: inventory_api
# Action Input 1: tire="good year", store="Issaquah"
# Observation 1: "50" tires are in stock at the Issaquah store.
# Thought 2: There are enough tires in stock.
# Action 2: Search Price
# Action Input 2: "good year" tires
# Observation 2: The price of one "good year" tire is $100.
# Thought 3: I can now calculate the total price.
# Action 3: Calculator
# Action Input 3: 30 * 100
# Observation 3: The total price for 30 "good year" tires is $3000.
# Thought 4: I have answered all parts of the question.
# Final Answer: There are enough "good year" tires in stock at the Issaquah store and the total price for 30 tires is $3000.
#
# > Finished chain.
#There are enough "good year" tires in stock at the Issaquah store and the total price for 30 tires is $3000.
import os
from typing import List, Union
from langchain.llms import AzureOpenAI
from langchain import LLMMathChain, LLMChain
from langchain.agents import Tool, AgentType, AgentOutputParser, LLMSingleActionAgent, AgentExecutor
from langchain.tools import StructuredTool
from langchain.chat_models import AzureChatOpenAI
from langchain.prompts import BaseChatPromptTemplate
from langchain.schema import AgentAction, AgentFinish, HumanMessage
import re
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35, Az_Open_Deployment_name_gpt3
from tools.tool_price import price_api
from tools.tool_inventory import inventory_api
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
# Set up the base template
template = """Complete the objective as best you can. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
These were previous tasks you completed:
Begin!
Question: {input}
{agent_scratchpad}"""
# Set up a prompt template
class CustomPromptTemplate(BaseChatPromptTemplate):
# The template to use
template: str
# The list of tools available
tools: List[Tool]
def format_messages(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
# Create a tools variable from the list of tools provided
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
formatted = self.template.format(**kwargs)
return [HumanMessage(content=formatted)]
llm = AzureOpenAI(
deployment_name=Az_Open_Deployment_name_gpt3,
model_name="text-davinci-003",
)
llm_math_chain = LLMMathChain(llm=llm)
tools = [
Tool(
name = "Search Price",
func=price_api.run,
description="useful for when you need to answer the price of tires"
),
StructuredTool.from_function(inventory_api),
Tool(
name="Calculator",
func=llm_math_chain.run,
description="useful for when you need to answer questions about math",
return_direct=True
)
]
prompt = CustomPromptTemplate(
template=template,
tools=tools,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because that is needed
input_variables=["input", "intermediate_steps"]
)
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
output_parser = CustomOutputParser()
## Set up OpenAI as chat LLM
chat = AzureChatOpenAI(deployment_name=Az_Open_Deployment_name_gpt35,
openai_api_version="2023-03-15-preview", temperature=0)
llm_chain = LLMChain(llm=chat, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
response = agent_executor.run("I want to buy four good year tires in my local Issaquah store, \
do we have enough in stock and how much is the total price?")
print(response)
response = agent_executor.run("I want to buy 30 good year tires in my local Issaquah store, \
do we have enough in stock and how much is the total price?")
print(response) | [
"input",
"intermediate_steps",
"Complete the objective as best you can. You have access to the following tools:\n\n{tools}\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\nThese were previous tasks you completed:\n\n\n\nBegin!\n\nQuestion: {input}\n{agent_scratchpad}"
] |
2024-01-10 | 04diiguyi/LangchainExample | src~az_openAI_llm_sample.py | # To run: In the current folder:
# python az_openAI_llm_sample.py
# This example is a sample that uses OpenAI gpt3 as a llm in langchain
# Example response
# Q: What did the fish say when he hit the wall?
# A: Dam!
import os
from langchain.llms import AzureOpenAI
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt3
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2022-12-01" #"2023-05-15"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
# Create an instance of Azure OpenAI
# Replace the deployment name with your own
llm = AzureOpenAI(
deployment_name=Az_Open_Deployment_name_gpt3,
model_name="text-davinci-003",
)
result = llm("Tell me a joke")
print(result)
| [] |
2024-01-10 | 04diiguyi/LangchainExample | src~app_sequential_chain.py | # To run: In the current folder:
# streamlit run app_sequential_chain.py
# This example is a sample that uses sequential chain that takes a user input
# use a LLMchain step to create a youtube title based on the input,
# and use this title as an input to a second LLMchain step to create a youtube
# script. These two LLMchain steps form a sequantial chain.
# Each llmchain uses OpenAI gpt3.5.
import os
from langchain.chat_models import AzureChatOpenAI
from langchain import PromptTemplate, LLMChain
from langchain.chains import SimpleSequentialChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
import streamlit as st
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
## Set up text input in UX
st.title("Q&A")
prompt = st.text_input("Design a Youtube video")
## Set up OpenAI as chat LLM
chat = AzureChatOpenAI(deployment_name=Az_Open_Deployment_name_gpt35,
openai_api_version="2023-03-15-preview", temperature=0)
## Title prompt
title_template="You are a helpful designer that helps user to design Youtube video title."
title_system_message_prompt = SystemMessagePromptTemplate.from_template(title_template)
title_human_template="Please design a title about {text}"
title_human_message_prompt = HumanMessagePromptTemplate.from_template(title_human_template)
title_prompt = ChatPromptTemplate.from_messages([title_system_message_prompt, title_human_message_prompt])
## Script prompt
script_template="You are a helpful designer that helps user to design Youtube video script."
script_system_message_prompt = SystemMessagePromptTemplate.from_template(script_template)
script_human_template="Please design a script about {text}"
script_human_message_prompt = HumanMessagePromptTemplate.from_template(script_human_template)
script_prompt = ChatPromptTemplate.from_messages([script_system_message_prompt, script_human_message_prompt])
if prompt:
title_chain = LLMChain(llm=chat, prompt=title_prompt)
script_chain = LLMChain(llm=chat, prompt=script_prompt)
## Note, we can also run title_chain first and use its response as a parameter passing to script_chain
sequential_chain = SimpleSequentialChain(chains=[title_chain, script_chain], verbose=True)
# get a chat completion from the formatted messages
response = sequential_chain.run(prompt)
print(response)
st.write(response) | [
"Design a Youtube video",
"[PLACEHOLDER, PLACEHOLDER]",
"You are a helpful designer that helps user to design Youtube video title.",
"You are a helpful designer that helps user to design Youtube video script.",
"Please design a title about {text}",
"Please design a script about {text}"
] |
2024-01-10 | 04diiguyi/LangchainExample | src~az_openAI_sample.py | # To run: In the current folder:
# python az_openAI_sample.py
# This example is a sample that directly calls OpenAI gpt3.
# Example response
# Sending a test completion job
# Write a tagline for an ice cream shop. Cool down with your favorite treat!
import openai
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt3
openai.api_key = Az_OpenAI_api_key
openai.api_base = Az_OpenAI_endpoint
openai.api_type = 'azure'
openai.api_version = '2023-05-15'
deployment_name=Az_Open_Deployment_name_gpt3
# Send a completion call to generate an answer
print('Sending a test completion job')
start_phrase = 'Write a tagline for an ice cream shop. '
response = openai.Completion.create(engine=deployment_name, prompt=start_phrase, max_tokens=100)
text = response['choices'][0]['text'].replace('\n', '').replace(' .', '.').strip()
print(start_phrase+text)
| [
"Write a tagline for an ice cream shop. "
] |
2024-01-10 | 04diiguyi/LangchainExample | src~tools~tool_inventory_with_validation.py | import os
import json
from langchain.llms import AzureOpenAI
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt3
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2022-12-01" #"2023-05-15"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
def inventory_api_extract_info(input: str) -> str:
# prompt = f"Extract `tire` and `store` information from `{input}` and create a json object. If the information is missing, leave it empty. Example: `I want to buy Michelin tires from store Issaquah` becomes {'store': 'Issaquah', 'tire': 'Michelin'}. Example: `I want to buy tires at my local store Bellevue` becomes {'store': 'Bellevue', 'tire': ''}. Example: `I want to buy four goodyear tires` becomes {'store': '', 'tire': 'goodyear'}. "
prompt = 'Extract `tire` and `store` information from ' + input + ' and create a json object. If the information is missing, leave it empty. Example: I want to buy Michelin tires from store Issaquah becomes {"store": "Issaquah", "tire": "Michelin"}. Example: I want to buy tires at my local store Bellevue becomes {"store": "Bellevue", "tire": ""}. Example: I want to buy four goodyear tires becomes {"store": "", "tire": "goodyear"}. '
print(f"prompt: {prompt}")
# Create an instance of Azure OpenAI
# Replace the deployment name with your own
llm = AzureOpenAI(
deployment_name=Az_Open_Deployment_name_gpt3,
model_name="text-davinci-003",
)
result = llm(prompt)
print(f"inventory_api_extract_info: {result}")
return result
def inventory_api_json(input: str):
obj = json.loads(input)
return obj
def inventory_api_v2(input: str) -> str:
"""Searches the inventory information for `tire` in `store`. The requied parameter `input` is text in message of agent run."""
input_str = inventory_api_extract_info(input)
info = inventory_api_json(input_str)
if(info['tire'] and info['store']):
return f"There are 10 {info['tire']} available in store {info['store']}."
elif (info['tire']):
return "Please ask human to provide `store` information"
else:
return "Please ask human to provide `tire` information"
| [
"Extract `tire` and `store` information from INPUT and create a json object. If the information is missing, leave it empty. Example: I want to buy Michelin tires from store Issaquah becomes {\"store\": \"Issaquah\", \"tire\": \"Michelin\"}. Example: I want to buy tires at my local store Bellevue becomes {\"store\": \"Bellevue\", \"tire\": \"\"}. Example: I want to buy four goodyear tires becomes {\"store\": \"\", \"tire\": \"goodyear\"}. "
] |
2024-01-10 | 04diiguyi/LangchainExample | src~IncorrectSamples~app_param_multi_agent_with_validation_failed.py | # To run: In the current folder:
# python app_param_multi_agent_with_validation.py
import os
from langchain.agents import initialize_agent, Tool, AgentType, load_tools
from langchain.tools import StructuredTool, tool
from langchain.llms import AzureOpenAI
from langchain.chat_models import AzureChatOpenAI
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35, Az_Open_Deployment_name_gpt3
from tools.tool_inventory import inventory_api
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
## Set up OpenAI as chat LLM
chat = AzureChatOpenAI(deployment_name=Az_Open_Deployment_name_gpt35,
openai_api_version="2023-03-15-preview", temperature=0)
tools = load_tools(
["human"]
)
tools.append(StructuredTool.from_function(inventory_api))
# The following tool construction is incorrect since it is not multiple inputs tool
#tools.append(Tool(
# name="Search Inventory",
# func=inventory_api,
# description="Search the inventory information for `tire` in `store`.",
# ))
llm = AzureOpenAI(
deployment_name=Az_Open_Deployment_name_gpt3,
model_name="text-davinci-003",
)
def inventory_api_validation(input: str) -> str:
print(f"Input is {input}")
result = llm(f"We need to check whether the following input has `store` and `tire` information, if not, we need the user for the necessary inputs. Input is {input}")
return result
tools.append(Tool(
name = "Inventory Tool Parameter Validation",
func=inventory_api_validation,
description="This tool must be called before Tool Search Inventory. The requied parameter `input` is text in message of agent run. If not enough information found, need to ask the user to provide the missing parameters. "
))
agent = initialize_agent(tools, chat, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
response = agent.run("I want to buy four good year tires and do we have enough in stock?")
#response = agent.run("I want to buy four good year tires in Issaquah store and do we have enough in stock?")
#response = agent.run("I want to buy 30 good year tires in my local Issaquah store, \
# do we have enough in stock?")
print(response) | [] |
2024-01-10 | 04diiguyi/LangchainExample | src~tools~tool_price.py | from langchain.tools import tool
@tool
def price_api(tire: str) -> str:
"""Searches the prices for the tire."""
return f"Tire {tire} is $150 each."
price_api | [
"Searches the prices for the tire."
] |
2024-01-10 | 04diiguyi/LangchainExample | src~az_openAI_chat.py | # To run: In the current folder:
# python az_openAI_chat.py
# This example is a sample that uses OpenAI gpt3.5 as a chat model
# It includes three types of requests,
# The first is a chat with only human messages
# The second is a chat with both system and human messages
# The third one is for streaming chat
# Example response
# J'aime programmer.
# There were several important events that happened in 1986,
# but one of the most significant events was the explosion of the Chernobyl
# nuclear power plant in Ukraine on April 26, 1986. It was the worst nuclear
# disaster in history, and it had a significant impact on the environment and
# the health of people in the surrounding areas.
# Verse 1:
# Bubbles rising to the top
# A refreshing drink that never stops
# Clear and crisp, it's oh so pure
# Sparkling water, I can't ignore
# ...
import os
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from api_key import Az_OpenAI_api_key, Az_OpenAI_endpoint, Az_Open_Deployment_name_gpt35
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_BASE"] = Az_OpenAI_endpoint
os.environ["OPENAI_API_KEY"] = Az_OpenAI_api_key
chat = AzureChatOpenAI(deployment_name=Az_Open_Deployment_name_gpt35,
openai_api_version="2023-03-15-preview", temperature=0)
response = chat([HumanMessage(content="Translate this sentence from English to French. I love programming.")])
print(response.content)
messages = [
SystemMessage(content="You are a helpful assistant that helps user to find information."),
HumanMessage(content="What is the most important event happened in 1986?")
]
response = chat(messages)
print(response.content)
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
chat_stream = AzureChatOpenAI(deployment_name=Az_Open_Deployment_name_gpt35,
openai_api_version="2023-03-15-preview",
streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0)
resp = chat_stream([HumanMessage(content="Write me a song about sparkling water.")])
print(resp) | [
"You are a helpful assistant that helps user to find information.",
"Write me a song about sparkling water.",
"Translate this sentence from English to French. I love programming.",
"What is the most important event happened in 1986?"
] |
2024-01-10 | mbenhaddou/kolibri-nlp | kolibri~cluster~topics_lda.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import logging
import os
import random
import tempfile
import gensim
import joblib
from gensim.models import CoherenceModel
import matplotlib.pyplot as plt
from kolibri.cluster.baseTopic import TopicModel
from kolibri.pipeComponent import Component
from kolibri.settings import resources_path
from kolibri.utils.downloader import Downloader
from kolibri.vocabulary import Vocabulary
logger = logging.getLogger(__name__)
TOPIC_MODEL_FILE_NAME = "topic_mallet_model.pkl"
PACKAGE = 'models/mallet'
DATA_DIR = resources_path
MALLET_DIR = os.path.join(DATA_DIR, PACKAGE)
URL_DATA="https://www.dropbox.com/s/lqcygea2y53rfbm/mallet-2.0.7.tar.gz?dl=1"
mallet_path = os.path.join(MALLET_DIR, 'mallet-2.0.7/bin/mallet')
class LdaMallet(Component, Downloader, TopicModel):
"""Python wrapper for LDA using `MALLET <http://mallet.cs.umass.edu/>`_.
Communication between MALLET and Python takes place by passing around data files on disk
and calling Java with subprocess.call().
Warnings
--------
This is **only** python wrapper for `MALLET LDA <http://mallet.cs.umass.edu/>`_,
you need to install original implementation first and pass the path to binary to ``mallet_path``.
"""
name = "lda_topics"
provides = ["topics"]
requires = ["tokens"]
defaults = {
# BILOU_flag determines whether to use BILOU tagging or not.
# More rigorous however requires more examples per entity
# rule of thumb: use only if more than 100 egs. per entity
"num_topics": 20,
# The maximum number of iterations for optimization algorithms.
"alpha": 50,
"workers": 4,
"optimize_interval": 0,
"iterations": 200,
"embeddings_dim":50,
"topic_threshold":0.0,
"random_seed": 0,
"use_lemma": True,
"nb_topic_start": 1,
"nb_topic_stop": 1,
"step": 1,
"output_folder": "."
}
def __init__(self, component_config=None, vocabulary=None, prefix=None):
Component.__init__(self, component_config)
Downloader.__init__(self,
PACKAGE,
url=URL_DATA,
download_dir=DATA_DIR)
start=self.component_config["nb_topic_start"]
stop=self.component_config["nb_topic_stop"]
if start > stop:
raise Exception("In topic experimentation start should be larger than stop.")
self.mallet_path = mallet_path
self.vocabulary = vocabulary
self.num_topics = self.component_config["num_topics"]
self.topic_threshold = self.component_config["topic_threshold"]
self.alpha = self.component_config["alpha"]
if prefix is None:
rand_prefix = hex(random.randint(0, 0xffffff))[2:] + '_'
prefix = os.path.join(tempfile.gettempdir(), rand_prefix)
self.prefix = prefix
self.workers = self.component_config["workers"]
self.optimize_interval = self.component_config["optimize_interval"]
self.iterations = self.component_config["iterations"]
self.random_seed = self.component_config["random_seed"]
self.topic_model=None
def train(self, training_data, cfg, **kwargs):
"""Train Mallet LDA.
Parameters
----------
corpus : iterable of iterable of (int, int)
Corpus in BoW format
"""
if self.vocabulary is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.vocabulary = Vocabulary()
self.vocabulary.add_training_data(training_data)
else:
self.num_terms = 0 if not self.vocabulary else 1 + max(self.vocabulary.keys())
if len(self.vocabulary.vocab) == 0:
raise ValueError("cannot compute LDA over an empty collection (no terms)")
self.vocabulary.build()
self.num_terms = self.vocabulary.count
corpus = [self.vocabulary.doc2bow(doc) for doc in training_data.training_examples]
if self.component_config["nb_topic_start"]-self.component_config["nb_topic_stop"]==0:
self.topic_model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, iterations=self.iterations, num_topics=self.num_topics, id2word=self.vocabulary.id2token)
else:
start=self.component_config["nb_topic_start"]
limit=self.component_config["nb_topic_stop"]
step=self.component_config["step"]
texts=[]
for example in training_data.training_examples:
texts.append([t.text for t in example.tokens])
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=num_topics, iterations=self.iterations, id2word=self.vocabulary.id2token)
model_list.append(num_topics)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=self.vocabulary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
x = range(start, limit, step)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.savefig(os.path.join(self.component_config["output_folder"], "coherence_plot.png"))
def process(self, message, **kwargs):
self._check_nlp_doc(message)
message.set_output_property("topics")
bow = [self.vocabulary.doc2bow(message)]
message.topics = self.topic_model[bow]
@classmethod
def load(cls,
model_dir=None,
model_metadata=None,
cached_component=None,
**kwargs
):
meta = model_metadata.for_component(cls.name)
file_name = meta.get("topic_file", TOPIC_MODEL_FILE_NAME)
classifier_file = os.path.join(model_dir, file_name)
if os.path.exists(classifier_file):
model = joblib.load(classifier_file)
return model
else:
return cls(meta)
def persist(self, model_dir):
"""Persist this model into the passed directory."""
classifier_file = os.path.join(model_dir, TOPIC_MODEL_FILE_NAME)
joblib.dump(self, classifier_file)
return {"topic_file": TOPIC_MODEL_FILE_NAME}
| [] |
2024-01-10 | mars12161/bayer_challenge_d_life | combinedApp.py | import streamlit as st
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image
import plotly.express as px
import plotly.graph_objects as go
import pickle
import streamlit.components.v1 as components
from streamlit_option_menu import option_menu
from streamlit_extras.switch_page_button import switch_page
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
import openai
from dotenv.main import load_dotenv
load_dotenv()
api_key = os.getenv("API_TOKEN")
export_path = os.path.join(os.getcwd(), 'exports')
st.set_page_config(
page_title="Breast Cancer Dataset",
page_icon="👩⚕️",
layout="wide",
initial_sidebar_state="expanded")
st.title('Breast Cancer Dataset')
st.markdown(
"""
<style>
[data-testid=stSidebar] [data-testid=stImage]{
text-align: center;
display: block;
margin-left: auto;
margin-right: auto;
width: 100%;
[data-testid="stSidebar"] {
width: 200px;
}
</style>
""", unsafe_allow_html=True
)
with st.sidebar:
image = Image.open('images/bc_awareness.png')
st.image(image, width=100)
selected = option_menu("Menu", ['Information', 'Exploratory Analysis', 'Machine Learning', 'Predictions', 'Ask the AI', 'Sources'])
selected
cd_2018 = pd.read_csv('./data/cd_2018.csv') #data needed for map
df = pd.read_csv('./data/dataset_factorised.csv') #data needed for EDA
model = pickle.load(open("./model/lr2.pkl", "rb"))
scaler = pickle.load(open("./model/trained_scaler.pkl", "rb"))
X_lr = pd.read_csv('./data/X_lr.csv',index_col= None)
y = df['diagnosis']
Malignant=df[df['diagnosis'] == 0]
Benign=df[df['diagnosis'] == 1]
def histplot(features):
plt.figure(figsize=(10,15))
for i, feature in enumerate(features):
bins = 20
plt.subplot(5, 2, i+1)
sns.histplot(Malignant[feature], bins=bins, color='blue', alpha=0.6, label='Malignant');
sns.histplot(Benign[feature], bins=bins, color='pink', alpha=0.5, label='Benign');
plt.title(str(' Density Plot of: ')+str(feature))
plt.xlabel(str(feature))
plt.ylabel('Count')
plt.legend(loc='upper right')
plt.tight_layout()
plt.show()
st.set_option('deprecation.showPyplotGlobalUse', False)
def plot_heatmap(confusion):
plt.figure(figsize=(4,3))
sns.heatmap(confusion, xticklabels = np.unique(y), yticklabels = np.unique(y),
cmap = 'RdPu', annot=True, fmt='g')
plt.xlabel('Predicted', fontsize=14)
plt.ylabel('Actual', fontsize = 14)
def information_tab():
st.subheader('Information')
st.markdown("In 2018, an estimated 2.1 million individuals were confronted with \
a breast cancer diagnosis across the globe. Regrettably, breast cancer stands as\
a formidable contributor to female mortality rates. Particularly in developing nations, \
the paucity of healthcare resources often impedes the prompt identification of this \
disease.\n\n Though breast cancer incidence rates remain relatively subdued in less developed regions, \
their mortality rates mirror those of more developed areas. This disconcerting finding suggests \
a distressing probability: a substantial number of cases might be escaping diagnosis entirely. \
This supports the urgency for improved detection methods.\n\nThe objective of our initiative \
is to enhance the screening of entire populations, thereby mitigating medical expenses, while \
leveraging computer-aided diagnosis. Additionally, the correlation between early detection and \
increased chances of survival amplifies the significance of this endeavour.")
image1 = Image.open('images/figure2.png')
st.image(image1)
st.write("Source: https://canceratlas.cancer.org")
st.write("---")
st.subheader('Breast Cancer Death Rate (per 100,000 Individuals) in 2018')
st.write("Included in the hover data below is the rate of diagnosed cases and the rate of breast cancer deaths per 100,000 people, for both sexes and age-standardized.")
fig = px.choropleth(cd_2018,
locations = "code",
color = "death_rate",
hover_name = "country",
hover_data = ["diagnosed_rate"],
color_continuous_scale = px.colors.sequential.Sunsetdark)
st.plotly_chart(fig)
def exploratory_analysis_tab():
st.subheader('Exploratory Analysis')
#divide feature names into groups
mean_features= ['radius_mean','texture_mean','perimeter_mean',\
'area_mean','smoothness_mean','compactness_mean',\
'concavity_mean','concave_points_mean','symmetry_mean',\
'fractal_dimension_mean']
error_features=['radius_se','texture_se','perimeter_se',\
'area_se','smoothness_se','compactness_se',\
'concavity_se','concave_points_se','symmetry_se',\
'fractal_dimension_se']
worst_features=['radius_worst','texture_worst','perimeter_worst',\
'area_worst','smoothness_worst','compactness_worst',\
'concavity_worst','concave_points_worst',\
'symmetry_worst','fractal_dimension_worst']
option = st.selectbox(
'What would you like to see?',
('Density Graphs', 'Correlation or Heatmap'))
if 'Density Graphs' in option:
option_1 = st.selectbox('Please select a group:', ('Mean Features', 'Standard Error Features', 'Worst Features'))
if 'Mean Features' in option_1:
st.write(df[mean_features].describe())
mf = histplot(mean_features)
st.pyplot(mf)
if 'Standard Error Features' in option_1:
st.write(df[error_features].describe())
ef = histplot(error_features)
st.pyplot(ef)
if 'Worst Features' in option_1:
st.write(df[worst_features].describe())
wf = histplot(worst_features)
st.pyplot(wf)
if 'Correlation or Heatmap' in option:
df_corr = df.drop(columns = ['id'])
fig, ax = plt.subplots()
option_2 = st.selectbox('Please select a group:', ('All', 'Mean Features', 'Standard Error Features', 'Worst Features'))
if 'All' in option_2:
sns.heatmap(df_corr.corr(), ax=ax)
st.write(fig)
if 'Mean Features' in option_2:
sns.heatmap(df_corr[mean_features].corr(), ax=ax)
st.write(fig)
if 'Standard Error Features' in option_2:
sns.heatmap(df_corr[error_features].corr(), ax=ax)
st.write(fig)
if 'Worst Features' in option_2:
sns.heatmap(df_corr[worst_features].corr(), ax=ax)
st.write(fig)
def machine_learning_tab():
st.subheader('Machine Learning')
st.write("All machine learning models were trained using an 80/20 split on stratified data that was standardised using StandardScaler.")
# link to dashboard here
st.subheader("Model Explainer Dashboard Using SHAP")
st.markdown("A **hub of interactive dashboards** for analyzing and explaining the predictions.")
st.components.v1.iframe("https://final-2znz-main-afectjcvzq-wm.a.run.app/", width=1300, height=700, scrolling=True)
def sources_tab():
st.subheader('Dataset')
st.markdown("http://archive.ics.uci.edu/dataset/17/breast+cancer+wisconsin+diagnostic")
st.subheader('Sources')
st.markdown("https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8626596/, \n\
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7950292/,\n\
https://canceratlas.cancer.org/, \nhttps://ourworldindata.org/cancer \n")
def add_info():
st.markdown("<h3 style='text-align: center; color: orchid;'>Cell Nuclei Measurements </h3>", unsafe_allow_html=True)
st.markdown("<font size='2'>You can also update the measurements by hand using the sliders in the sidebar. </font>", unsafe_allow_html=True)
slider_labels = [
("Concavity (mean)", "concavity_mean"),
("Concave points (mean)", "concave_points_mean"),
("Radius (se)", "radius_se"),
("Perimeter (se)", "perimeter_se"),
("Area (se)", "area_se"),
("Radius (worst)", "radius_worst"),
("Texture (worst)", "texture_worst"),
("Perimeter (worst)", "perimeter_worst"),
("Area (worst)", "area_worst"),
("Concavity (worst)", "concavity_worst"),
("Concave points (worst)", "concave_points_worst"),
("Symmetry (worst)", "symmetry_worst"),
]
input_dict = {}
for label, key in slider_labels:
input_dict[key] = st.slider(label, min_value = float(0), max_value = float(X_lr[key].max()),
value = float(X_lr[key].mean())
)
return input_dict
def get_scaled_values(input_dict):
scaled_dict = {}
for key, value in input_dict.items():
max_val = X_lr[key].max()
min_val = X_lr[key].min()
scaled_value = (value - min_val) / (max_val - min_val)
scaled_dict[key] = scaled_value
return scaled_dict
def get_radar_chart(input_data):
input_data = get_scaled_values(input_data)
categories = ['Radius', 'Texture', 'Perimeter', 'Area',
'Concavity', 'Concave Points', 'Symmetry'
]
fig = go.Figure()
fig.add_trace(go.Scatterpolar(
r=[
0,0,0,0,input_data['concavity_mean'], input_data['concave_points_mean'],0
],
theta=categories,
fill='toself',
name='Mean Value'
))
fig.add_trace(go.Scatterpolar(
r=[
input_data['radius_se'],0, input_data['perimeter_se'], input_data['area_se'], 0,0
],
theta=categories,
fill='toself',
name='Standard Error'
))
fig.add_trace(go.Scatterpolar(
r=[
input_data['radius_worst'], input_data['texture_worst'], input_data['perimeter_worst'],
input_data['area_worst'], input_data['concavity_worst'], input_data['concave_points_worst'],
input_data['symmetry_worst']
],
theta=categories,
fill='toself',
name='Worst Value'
))
fig.update_layout(
polar = dict(radialaxis = dict(visible = True,range = [0, 1])),
showlegend = True
)
return fig
def add_predictions(input_data):
input_array = np.array(list(input_data.values())).reshape(1, -1)
input_array_scaled = scaler.transform(input_array)
prediction = model.predict(input_array_scaled)
st.subheader("**The cell cluster is:**")
if prediction[0] == 0:
st.write("<span class='diagnosis benign'>:blue[**Benign**]</span>", unsafe_allow_html=True)
else:
st.write("<span class='diagnosis malignant'>:blue[**Malignant**]</span>", unsafe_allow_html=True)
st.write(f"Probability of being benign: {model.predict_proba(input_array_scaled)[0][0]: .3f}")
st.write(f"Probability of being malignant: {model.predict_proba(input_array_scaled)[0][1]: .3f}")
return (model.predict_proba(input_array_scaled)[0][0], model.predict_proba(input_array_scaled)[0][1])
def assistant(B, M):
openai.api_key = api_key
prompt = (
"I build an app with Wisconsin breast cancer diagnosis and used machine learning to give you these results, "
"now act as the role of assistant within that app and generate general guidelines and tell them what should they do act as you are talking to the patients directly"
f"Prediction Results:\nMalignant Probability: {M}\nBenign Probability: {B}"
)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.6,
max_tokens = 400
)
guidelines = response.choices[0].text.strip()
return(guidelines)
def predictions_tab():
st.subheader('Predictions')
with st.container():
st.write("Please connect this app to your cytology lab to help diagnose breast cancer form your tissue sample. This app uses a logistic regression machine learning model to predict whether a breast mass is benign or malignant based on the measurements provided from your cytosis lab. ")
st.text("")
st.markdown('**This app can assist medical professionals in making a diagnosis, but should not be used as a substitute for a professional diagnosis**')
st.write("---")
col1, col2 = st.columns([1, 3])
with col1:
with st.form("Prediction Form"):
input_data = add_info()
submitted = st.form_submit_button("Submit")
with col2:
if submitted:
st.markdown("<h2 style='text-align: center; color: orchid;'>Cell Cluster Prediction </h2>", unsafe_allow_html=True)
radar_chart = get_radar_chart(input_data)
st.plotly_chart(radar_chart)
B, M = add_predictions(input_data)
st.write("---")
# if st.button('Receive tips from AI!'):
st.write(assistant(B, M))
st.write("---")
def find_exported_files(path):
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".png") and file != 'figure2.png' and file != 'bc_awareness.png':
return os.path.join(root, file)
return None
def ask_pandas():
llm = OpenAI(api_key)
pandasai = PandasAI(llm, save_charts=True, save_charts_path=export_path, verbose=True)
st.markdown("<h2 style='color: DarkOrchid;'>Ask the AI </h2>", unsafe_allow_html=True)
st.write("Here you can ask the AI a question about the data. The AI currently running in the background is OpenAI's GPT.")
st.write("Other Large Language Models are available, such as HuggingFace's Falcon.")
st.markdown('**Example questions:**')
st.write("- What is the average radius of the cell clusters?")
st.write("- What is the standard error of the mean of the cell clusters?")
st.write("- Plot the mean radius of the cell clusters by diagnosis.")
st.write("- Plot the distribution of the diagnosis in a pie chart.")
st.markdown('**Note:** The AI is still learning, so it may not be able to answer all questions correctly.')
with st.form("Question"):
question = st.text_input("Question", value="", type="default")
submitted = st.form_submit_button("Submit")
if submitted:
with st.spinner("PandasAI is thinking..."):
answer = pandasai.run(df, prompt=question)
st.write(answer)
# Plotting
chart_file = find_exported_files(export_path)
if chart_file:
st.image(chart_file)
os.remove(chart_file)
def main():
if 'Information' in selected:
information_tab()
if 'Exploratory Analysis' in selected:
exploratory_analysis_tab()
if 'Machine Learning' in selected:
machine_learning_tab()
if 'Sources' in selected:
sources_tab()
if 'Predictions' in selected:
predictions_tab()
if 'AI' in selected:
ask_pandas()
if __name__ == '__main__':
main() | [
"I build an app with Wisconsin breast cancer diagnosis and used machine learning to give you these results, now act as the role of assistant within that app and generate general guidelines and tell them what should they do act as you are talking to the patients directlyPrediction Results:\nMalignant Probability: PLACEHOLDER\nBenign Probability: PLACEHOLDER"
] |
2024-01-10 | mars12161/bayer_challenge_d_life | main_misi.py | import streamlit as st
import pickle5 as pickle
import pandas as pd
import plotly.graph_objects as go
import numpy as np
#from pandasai import PandasAI
#import openai
#from pandasai.llm.openai import OpenAI
"""def get_clean_data():
data = pd.read_csv("../data/data.csv")
data = data.drop(['Unnamed: 32', 'id'], axis=1)
data['diagnosis'] = data['diagnosis'].map({ 'M': 1, 'B': 0 })
return data
def add_sidebar():
st.sidebar.header("Cell Nuclei Measurements")
data = get_clean_data()
slider_labels = [
("Radius (mean)", "radius_mean"),
("Texture (mean)", "texture_mean"),
("Perimeter (mean)", "perimeter_mean"),
("Area (mean)", "area_mean"),
("Smoothness (mean)", "smoothness_mean"),
("Compactness (mean)", "compactness_mean"),
("Concavity (mean)", "concavity_mean"),
("Concave points (mean)", "concave points_mean"),
("Symmetry (mean)", "symmetry_mean"),
("Fractal dimension (mean)", "fractal_dimension_mean"),
("Radius (se)", "radius_se"),
("Texture (se)", "texture_se"),
("Perimeter (se)", "perimeter_se"),
("Area (se)", "area_se"),
("Smoothness (se)", "smoothness_se"),
("Compactness (se)", "compactness_se"),
("Concavity (se)", "concavity_se"),
("Concave points (se)", "concave points_se"),
("Symmetry (se)", "symmetry_se"),
("Fractal dimension (se)", "fractal_dimension_se"),
("Radius (worst)", "radius_worst"),
("Texture (worst)", "texture_worst"),
("Perimeter (worst)", "perimeter_worst"),
("Area (worst)", "area_worst"),
("Smoothness (worst)", "smoothness_worst"),
("Compactness (worst)", "compactness_worst"),
("Concavity (worst)", "concavity_worst"),
("Concave points (worst)", "concave points_worst"),
("Symmetry (worst)", "symmetry_worst"),
("Fractal dimension (worst)", "fractal_dimension_worst"),
]
input_dict = {}
for label, key in slider_labels:
input_dict[key] = st.sidebar.slider(
label,
min_value=float(0),
max_value=float(data[key].max()),
value=float(data[key].mean())
)
return input_dict
def get_scaled_values(input_dict):
data = get_clean_data()
X = data.drop(['diagnosis'], axis=1)
scaled_dict = {}
for key, value in input_dict.items():
max_val = X[key].max()
min_val = X[key].min()
scaled_value = (value - min_val) / (max_val - min_val)
scaled_dict[key] = scaled_value
return scaled_dict
def get_radar_chart(input_data):
input_data = get_scaled_values(input_data)
categories = ['Radius', 'Texture', 'Perimeter', 'Area',
'Smoothness', 'Compactness',
'Concavity', 'Concave Points',
'Symmetry', 'Fractal Dimension']
fig = go.Figure()
fig.add_trace(go.Scatterpolar(
r=[
input_data['radius_mean'], input_data['texture_mean'], input_data['perimeter_mean'],
input_data['area_mean'], input_data['smoothness_mean'], input_data['compactness_mean'],
input_data['concavity_mean'], input_data['concave points_mean'], input_data['symmetry_mean'],
input_data['fractal_dimension_mean']
],
theta=categories,
fill='toself',
name='Mean Value'
))
fig.add_trace(go.Scatterpolar(
r=[
input_data['radius_se'], input_data['texture_se'], input_data['perimeter_se'], input_data['area_se'],
input_data['smoothness_se'], input_data['compactness_se'], input_data['concavity_se'],
input_data['concave points_se'], input_data['symmetry_se'],input_data['fractal_dimension_se']
],
theta=categories,
fill='toself',
name='Standard Error'
))
fig.add_trace(go.Scatterpolar(
r=[
input_data['radius_worst'], input_data['texture_worst'], input_data['perimeter_worst'],
input_data['area_worst'], input_data['smoothness_worst'], input_data['compactness_worst'],
input_data['concavity_worst'], input_data['concave points_worst'], input_data['symmetry_worst'],
input_data['fractal_dimension_worst']
],
theta=categories,
fill='toself',
name='Worst Value'
))
fig.update_layout(
polar=dict(
radialaxis=dict(
visible=True,
range=[0, 1]
)),
showlegend=True
)
return fig
def add_predictions(input_data):
model = pickle.load(open("../model/model.pkl", "rb"))
scaler = pickle.load(open("../model/scaler.pkl", "rb"))
input_array = np.array(list(input_data.values())).reshape(1, -1)
input_array_scaled = scaler.transform(input_array)
prediction = model.predict(input_array_scaled)
st.subheader("Cell cluster prediction")
st.write("The cell cluster is:")
if prediction[0] == 0:
st.write("<span class='diagnosis benign'>Benign</span>", unsafe_allow_html=True)
else:
st.write("<span class='diagnosis malicious'>Malicious</span>", unsafe_allow_html=True)
st.write("Probability of being benign: ", model.predict_proba(input_array_scaled)[0][0])
st.write("Probability of being malicious: ", model.predict_proba(input_array_scaled)[0][1])
st.write("This app can assist medical professionals in making a diagnosis, but should not be used as a substitute for a professional diagnosis.")
return (model.predict_proba(input_array_scaled)[0][0], model.predict_proba(input_array_scaled)[0][1])
def assistant(B, M):
openai.api_key = "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"
prompt = f
I build an app with Wisconsin breast cancer diagnosis and used machine learning to give you these results, now I want you to be in the role of assistant within that app and generate general guidelines on what should he/she do when I give you the percentage
now generate guidelines for these predictions as you are talking to the patient:
Prediction Results:
Malignant Probability: {M}
Benign Probability: {B}
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.6,
max_tokens = 400
)
guidelines = response.choices[0].text.strip()
return(guidelines)
def main():
st.set_page_config(
page_title="Breast Cancer Predictor",
page_icon=":female-doctor:",
layout="wide",
initial_sidebar_state="expanded"
)
input_data = add_sidebar()
with st.container():
st.title("Breast Cancer Predictor")
st.write("Please connect this app to your cytology lab to help diagnose breast cancer form your tissue sample. This app predicts using a machine learning model whether a breast mass is benign or malignant based on the measurements it receives from your cytosis lab. You can also update the measurements by hand using the sliders in the sidebar. ")
col1, col2 = st.columns([4,1])
with col1:
radar_chart = get_radar_chart(input_data)
st.plotly_chart(radar_chart)
st.write("---")
with col2:
B , M = add_predictions(input_data)
st.header("Ask the AI")
st.write("Here you can ask the AI a question about the data")
if st.button('Generate guidlines!'):
with col1:
st.write(assistant(B, M))
if __name__ == '__main__':
main() | [] |
2024-01-10 | Mr-Sure/langchain | langchain~agents~agent_toolkits~sql~toolkit.py | """Toolkit for interacting with a SQL database."""
from typing import List
from pydantic import Field
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.llms.base import BaseLLM
from langchain.llms.openai import OpenAI
from langchain.sql_database import SQLDatabase
from langchain.tools import BaseTool
from langchain.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QueryCheckerTool,
QuerySQLDataBaseTool,
)
class SQLDatabaseToolkit(BaseToolkit):
"""Toolkit for interacting with SQL databases."""
db: SQLDatabase = Field(exclude=True)
llm: BaseLLM = Field(default_factory=lambda: OpenAI(temperature=0))
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self.db.dialect
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
QuerySQLDataBaseTool(db=self.db),
InfoSQLDatabaseTool(db=self.db),
ListSQLDatabaseTool(db=self.db),
QueryCheckerTool(db=self.db, llm=self.llm),
]
| [] |
2024-01-10 | Mr-Sure/langchain | langchain~agents~agent_toolkits~openapi~planner.py | """Agent that interacts with OpenAPI APIs via a hierarchical planning approach."""
import json
import re
from typing import List, Optional
import yaml
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.openapi.planner_prompt import (
API_CONTROLLER_PROMPT,
API_CONTROLLER_TOOL_DESCRIPTION,
API_CONTROLLER_TOOL_NAME,
API_ORCHESTRATOR_PROMPT,
API_PLANNER_PROMPT,
API_PLANNER_TOOL_DESCRIPTION,
API_PLANNER_TOOL_NAME,
PARSING_GET_PROMPT,
PARSING_POST_PROMPT,
REQUESTS_GET_TOOL_DESCRIPTION,
REQUESTS_POST_TOOL_DESCRIPTION,
)
from langchain.agents.agent_toolkits.openapi.spec import ReducedOpenAPISpec
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.tools import Tool
from langchain.chains.llm import LLMChain
from langchain.llms.openai import OpenAI
from langchain.memory import ReadOnlySharedMemory
from langchain.prompts import PromptTemplate
from langchain.requests import RequestsWrapper
from langchain.schema import BaseLanguageModel
from langchain.tools.base import BaseTool
from langchain.tools.requests.tool import BaseRequestsTool
#
# Requests tools with LLM-instructed extraction of truncated responses.
#
# Of course, truncating so bluntly may lose a lot of valuable
# information in the response.
# However, the goal for now is to have only a single inference step.
MAX_RESPONSE_LENGTH = 5000
class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool):
name = "requests_get"
description = REQUESTS_GET_TOOL_DESCRIPTION
response_length: Optional[int] = MAX_RESPONSE_LENGTH
llm_chain = LLMChain(
llm=OpenAI(),
prompt=PARSING_GET_PROMPT,
)
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
data_params = data.get("params")
response = self.requests_wrapper.get(data["url"], params=data_params)
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool):
name = "requests_post"
description = REQUESTS_POST_TOOL_DESCRIPTION
response_length: Optional[int] = MAX_RESPONSE_LENGTH
llm_chain = LLMChain(
llm=OpenAI(),
prompt=PARSING_POST_PROMPT,
)
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.post(data["url"], data["data"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
#
# Orchestrator, planner, controller.
#
def _create_api_planner_tool(
api_spec: ReducedOpenAPISpec, llm: BaseLanguageModel
) -> Tool:
endpoint_descriptions = [
f"{name} {description}" for name, description, _ in api_spec.endpoints
]
prompt = PromptTemplate(
template=API_PLANNER_PROMPT,
input_variables=["query"],
partial_variables={"endpoints": "- " + "- ".join(endpoint_descriptions)},
)
chain = LLMChain(llm=llm, prompt=prompt)
tool = Tool(
name=API_PLANNER_TOOL_NAME,
description=API_PLANNER_TOOL_DESCRIPTION,
func=chain.run,
)
return tool
def _create_api_controller_agent(
api_url: str,
api_docs: str,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
) -> AgentExecutor:
tools: List[BaseTool] = [
RequestsGetToolWithParsing(requests_wrapper=requests_wrapper),
RequestsPostToolWithParsing(requests_wrapper=requests_wrapper),
]
prompt = PromptTemplate(
template=API_CONTROLLER_PROMPT,
input_variables=["input", "agent_scratchpad"],
partial_variables={
"api_url": api_url,
"api_docs": api_docs,
"tool_names": ", ".join([tool.name for tool in tools]),
"tool_descriptions": "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
),
},
)
agent = ZeroShotAgent(
llm_chain=LLMChain(llm=llm, prompt=prompt),
allowed_tools=[tool.name for tool in tools],
)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
def _create_api_controller_tool(
api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
) -> Tool:
"""Expose controller as a tool.
The tool is invoked with a plan from the planner, and dynamically
creates a controller agent with relevant documentation only to
constrain the context.
"""
base_url = api_spec.servers[0]["url"] # TODO: do better.
def _create_and_run_api_controller_agent(plan_str: str) -> str:
pattern = r"\b(GET|POST)\s+(/\S+)*"
matches = re.findall(pattern, plan_str)
endpoint_names = [
"{method} {route}".format(method=method, route=route.split("?")[0])
for method, route in matches
]
endpoint_docs_by_name = {name: docs for name, _, docs in api_spec.endpoints}
docs_str = ""
for endpoint_name in endpoint_names:
docs = endpoint_docs_by_name.get(endpoint_name)
if not docs:
raise ValueError(f"{endpoint_name} endpoint does not exist.")
docs_str += f"== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n"
agent = _create_api_controller_agent(base_url, docs_str, requests_wrapper, llm)
return agent.run(plan_str)
return Tool(
name=API_CONTROLLER_TOOL_NAME,
func=_create_and_run_api_controller_agent,
description=API_CONTROLLER_TOOL_DESCRIPTION,
)
def create_openapi_agent(
api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
shared_memory: Optional[ReadOnlySharedMemory] = None,
) -> AgentExecutor:
"""Instantiate API planner and controller for a given spec.
Inject credentials via requests_wrapper.
We use a top-level "orchestrator" agent to invoke the planner and controller,
rather than a top-level planner
that invokes a controller with its plan. This is to keep the planner simple.
"""
tools = [
_create_api_planner_tool(api_spec, llm),
_create_api_controller_tool(api_spec, requests_wrapper, llm),
]
prompt = PromptTemplate(
template=API_ORCHESTRATOR_PROMPT,
input_variables=["input", "agent_scratchpad"],
partial_variables={
"tool_names": ", ".join([tool.name for tool in tools]),
"tool_descriptions": "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
),
},
)
agent = ZeroShotAgent(
llm_chain=LLMChain(llm=llm, prompt=prompt, memory=shared_memory),
allowed_tools=[tool.name for tool in tools],
)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
| [
"tool_descriptions",
"\n",
"tool_names",
"agent_scratchpad",
"- ",
"input",
", ",
"endpoints"
] |
2024-01-10 | Mr-Sure/langchain | tests~unit_tests~test_python.py | """Test functionality of Python REPL."""
import sys
import pytest
from langchain.python import PythonREPL
from langchain.tools.python.tool import PythonAstREPLTool, PythonREPLTool
_SAMPLE_CODE = """
```
def multiply():
print(5*6)
multiply()
```
"""
_AST_SAMPLE_CODE = """
```
def multiply():
return(5*6)
multiply()
```
"""
def test_python_repl() -> None:
"""Test functionality when globals/locals are not provided."""
repl = PythonREPL()
# Run a simple initial command.
repl.run("foo = 1")
assert repl.locals is not None
assert repl.locals["foo"] == 1
# Now run a command that accesses `foo` to make sure it still has it.
repl.run("bar = foo * 2")
assert repl.locals is not None
assert repl.locals["bar"] == 2
def test_python_repl_no_previous_variables() -> None:
"""Test that it does not have access to variables created outside the scope."""
foo = 3 # noqa: F841
repl = PythonREPL()
output = repl.run("print(foo)")
assert output == "name 'foo' is not defined"
def test_python_repl_pass_in_locals() -> None:
"""Test functionality when passing in locals."""
_locals = {"foo": 4}
repl = PythonREPL(_locals=_locals)
repl.run("bar = foo * 2")
assert repl.locals is not None
assert repl.locals["bar"] == 8
def test_functionality() -> None:
"""Test correct functionality."""
chain = PythonREPL()
code = "print(1 + 1)"
output = chain.run(code)
assert output == "2\n"
def test_functionality_multiline() -> None:
"""Test correct functionality for ChatGPT multiline commands."""
chain = PythonREPL()
tool = PythonREPLTool(python_repl=chain)
output = tool.run(_SAMPLE_CODE)
assert output == "30\n"
def test_python_ast_repl_multiline() -> None:
"""Test correct functionality for ChatGPT multiline commands."""
if sys.version_info < (3, 9):
pytest.skip("Python 3.9+ is required for this test")
tool = PythonAstREPLTool()
output = tool.run(_AST_SAMPLE_CODE)
assert output == 30
def test_function() -> None:
"""Test correct functionality."""
chain = PythonREPL()
code = "def add(a, b): " " return a + b"
output = chain.run(code)
assert output == ""
code = "print(add(1, 2))"
output = chain.run(code)
assert output == "3\n"
| [] |
2024-01-10 | Mr-Sure/langchain | tests~integration_tests~vectorstores~test_opensearch.py | """Test OpenSearch functionality."""
import pytest
from langchain.docstore.document import Document
from langchain.vectorstores.opensearch_vector_search import (
PAINLESS_SCRIPTING_SEARCH,
SCRIPT_SCORING_SEARCH,
OpenSearchVectorSearch,
)
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
DEFAULT_OPENSEARCH_URL = "http://localhost:9200"
texts = ["foo", "bar", "baz"]
def test_opensearch() -> None:
"""Test end to end indexing and search using Approximate Search."""
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_opensearch_with_custom_field_name() -> None:
"""Test indexing and search using custom vector field and text field name."""
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
vector_field="my_vector",
text_field="custom_text",
)
output = docsearch.similarity_search(
"foo", k=1, vector_field="my_vector", text_field="custom_text"
)
assert output == [Document(page_content="foo")]
text_input = ["test", "add", "text", "method"]
OpenSearchVectorSearch.add_texts(
docsearch, text_input, vector_field="my_vector", text_field="custom_text"
)
output = docsearch.similarity_search(
"add", k=1, vector_field="my_vector", text_field="custom_text"
)
assert output == [Document(page_content="foo")]
def test_opensearch_with_metadatas() -> None:
"""Test end to end indexing and search with metadata."""
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
opensearch_url=DEFAULT_OPENSEARCH_URL,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_add_text() -> None:
"""Test adding additional text elements to existing index."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
docids = OpenSearchVectorSearch.add_texts(docsearch, text_input, metadatas)
assert len(docids) == len(text_input)
def test_opensearch_script_scoring() -> None:
"""Test end to end indexing and search using Script Scoring Search."""
pre_filter_val = {"bool": {"filter": {"term": {"text": "bar"}}}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
output = docsearch.similarity_search(
"foo", k=1, search_type=SCRIPT_SCORING_SEARCH, pre_filter=pre_filter_val
)
assert output == [Document(page_content="bar")]
def test_add_text_script_scoring() -> None:
"""Test adding additional text elements and validating using Script Scoring."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
text_input,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
OpenSearchVectorSearch.add_texts(docsearch, texts, metadatas)
output = docsearch.similarity_search(
"add", k=1, search_type=SCRIPT_SCORING_SEARCH, space_type="innerproduct"
)
assert output == [Document(page_content="test")]
def test_opensearch_painless_scripting() -> None:
"""Test end to end indexing and search using Painless Scripting Search."""
pre_filter_val = {"bool": {"filter": {"term": {"text": "baz"}}}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
output = docsearch.similarity_search(
"foo", k=1, search_type=PAINLESS_SCRIPTING_SEARCH, pre_filter=pre_filter_val
)
assert output == [Document(page_content="baz")]
def test_add_text_painless_scripting() -> None:
"""Test adding additional text elements and validating using Painless Scripting."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
text_input,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
OpenSearchVectorSearch.add_texts(docsearch, texts, metadatas)
output = docsearch.similarity_search(
"add", k=1, search_type=PAINLESS_SCRIPTING_SEARCH, space_type="cosineSimilarity"
)
assert output == [Document(page_content="test")]
def test_opensearch_invalid_search_type() -> None:
"""Test to validate similarity_search by providing invalid search_type."""
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
with pytest.raises(ValueError):
docsearch.similarity_search("foo", k=1, search_type="invalid_search_type")
def test_opensearch_embedding_size_zero() -> None:
"""Test to validate indexing when embedding size is zero."""
with pytest.raises(RuntimeError):
OpenSearchVectorSearch.from_texts(
[], FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
| [] |
2024-01-10 | Mr-Sure/langchain | langchain~agents~agent_toolkits~openapi~planner_prompt.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
API_PLANNER_PROMPT = """You are a planner that plans a sequence of API calls to assist with user queries against an API.
You should:
1) evaluate whether the user query can be solved by the API documentated below. If no, say why.
2) if yes, generate a plan of API calls and say what they are doing step by step.
You should only use API endpoints documented below ("Endpoints you can use:").
Some user queries can be resolved in a single API call, but some will require several API calls.
The plan will be passed to an API controller that can format it into web requests and return the responses.
----
Here are some examples:
Fake endpoints for examples:
GET /user to get information about the current user
GET /products/search search across products
POST /users/{{id}}/cart to add products to a user's cart
User query: tell me a joke
Plan: Sorry, this API's domain is shopping, not comedy.
Usery query: I want to buy a couch
Plan: 1. GET /products/search to search for couches
2. GET /user to find the user's id
3. POST /users/{{id}}/cart to add a couch to the user's cart
----
Here are endpoints you can use. Do not reference any of the endpoints above.
{endpoints}
----
User query: {query}
Plan:"""
API_PLANNER_TOOL_NAME = "api_planner"
API_PLANNER_TOOL_DESCRIPTION = f"Can be used to generate the right API calls to assist with a user query, like {API_PLANNER_TOOL_NAME}(query). Should always be called before trying to call the API controller."
# Execution.
API_CONTROLLER_PROMPT = """You are an agent that gets a sequence of API calls and given their documentation, should execute them and return the final response.
If you cannot complete them and run into issues, you should explain the issue. If you're able to resolve an API call, you can retry the API call. When interacting with API objects, you should extract ids for inputs to other API calls but ids and names for outputs returned to the User.
Here is documentation on the API:
Base url: {api_url}
Endpoints:
{api_docs}
Here are tools to execute requests against the API: {tool_descriptions}
Starting below, you should follow this format:
Plan: the plan of API calls to execute
Thought: you should always think about what to do
Action: the action to take, should be one of the tools [{tool_names}]
Action Input: the input to the action
Observation: the output of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I am finished executing the plan (or, I cannot finish executing the plan without knowing some other information.)
Final Answer: the final output from executing the plan or missing information I'd need to re-plan correctly.
Begin!
Plan: {input}
Thought:
{agent_scratchpad}
"""
API_CONTROLLER_TOOL_NAME = "api_controller"
API_CONTROLLER_TOOL_DESCRIPTION = f"Can be used to execute a plan of API calls, like {API_CONTROLLER_TOOL_NAME}(plan)."
# Orchestrate planning + execution.
# The goal is to have an agent at the top-level (e.g. so it can recover from errors and re-plan) while
# keeping planning (and specifically the planning prompt) simple.
API_ORCHESTRATOR_PROMPT = """You are an agent that assists with user queries against API, things like querying information or creating resources.
Some user queries can be resolved in a single API call, particularly if you can find appropriate params from the OpenAPI spec; though some require several API call.
You should always plan your API calls first, and then execute the plan second.
You should never return information without executing the api_controller tool.
Here are the tools to plan and execute API requests: {tool_descriptions}
Starting below, you should follow this format:
User query: the query a User wants help with related to the API
Thought: you should always think about what to do
Action: the action to take, should be one of the tools [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I am finished executing a plan and have the information the user asked for or the data the used asked to create
Final Answer: the final output from executing the plan
Example:
User query: can you add some trendy stuff to my shopping cart.
Thought: I should plan API calls first.
Action: api_planner
Action Input: I need to find the right API calls to add trendy items to the users shopping cart
Observation: 1) GET /items with params 'trending' is 'True' to get trending item ids
2) GET /user to get user
3) POST /cart to post the trending items to the user's cart
Thought: I'm ready to execute the API calls.
Action: api_controller
Action Input: 1) GET /items params 'trending' is 'True' to get trending item ids
2) GET /user to get user
3) POST /cart to post the trending items to the user's cart
...
Begin!
User query: {input}
Thought: I should generate a plan to help with this query and then copy that plan exactly to the controller.
{agent_scratchpad}"""
REQUESTS_GET_TOOL_DESCRIPTION = """Use this to GET content from a website.
Input to the tool should be a json string with 3 keys: "url", "params" and "output_instructions".
The value of "url" should be a string.
The value of "params" should be a dict of the needed and available parameters from the OpenAPI spec related to the endpoint.
If parameters are not needed, or not available, leave it empty.
The value of "output_instructions" should be instructions on what information to extract from the response,
for example the id(s) for a resource(s) that the GET request fetches.
"""
PARSING_GET_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
REQUESTS_POST_TOOL_DESCRIPTION = """Use this when you want to POST to a website.
Input to the tool should be a json string with 3 keys: "url", "data", and "output_instructions".
The value of "url" should be a string.
The value of "data" should be a dictionary of key-value pairs you want to POST to the url.
The value of "summary_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the POST request creates.
Always use double quotes for strings in the json string."""
PARSING_POST_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
| [
"Here is an API response:\n\n{response}\n\n====\nYour task is to extract some information according to these instructions: {instructions}\nWhen working with API objects, you should usually use ids over names.\nIf the response indicates an error, you should instead output a summary of the error.\n\nOutput:",
"instructions",
"Here is an API response:\n\n{response}\n\n====\nYour task is to extract some information according to these instructions: {instructions}\nWhen working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.\nIf the response indicates an error, you should instead output a summary of the error.\n\nOutput:",
"You are an agent that gets a sequence of API calls and given their documentation, should execute them and return the final response.\nIf you cannot complete them and run into issues, you should explain the issue. If you're able to resolve an API call, you can retry the API call. When interacting with API objects, you should extract ids for inputs to other API calls but ids and names for outputs returned to the User.\n\n\nHere is documentation on the API:\nBase url: {api_url}\nEndpoints:\n{api_docs}\n\n\nHere are tools to execute requests against the API: {tool_descriptions}\n\n\nStarting below, you should follow this format:\n\nPlan: the plan of API calls to execute\nThought: you should always think about what to do\nAction: the action to take, should be one of the tools [{tool_names}]\nAction Input: the input to the action\nObservation: the output of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I am finished executing the plan (or, I cannot finish executing the plan without knowing some other information.)\nFinal Answer: the final output from executing the plan or missing information I'd need to re-plan correctly.\n\n\nBegin!\n\nPlan: {input}\nThought:\n{agent_scratchpad}\n",
"response",
"You are a planner that plans a sequence of API calls to assist with user queries against an API.\n\nYou should:\n1) evaluate whether the user query can be solved by the API documentated below. If no, say why.\n2) if yes, generate a plan of API calls and say what they are doing step by step.\n\nYou should only use API endpoints documented below (\"Endpoints you can use:\").\nSome user queries can be resolved in a single API call, but some will require several API calls.\nThe plan will be passed to an API controller that can format it into web requests and return the responses.\n\n----\n\nHere are some examples:\n\nFake endpoints for examples:\nGET /user to get information about the current user\nGET /products/search search across products\nPOST /users/{{id}}/cart to add products to a user's cart\n\nUser query: tell me a joke\nPlan: Sorry, this API's domain is shopping, not comedy.\n\nUsery query: I want to buy a couch\nPlan: 1. GET /products/search to search for couches\n2. GET /user to find the user's id\n3. POST /users/{{id}}/cart to add a couch to the user's cart\n\n----\n\nHere are endpoints you can use. Do not reference any of the endpoints above.\n\n{endpoints}\n\n----\n\nUser query: {query}\nPlan:",
"You are an agent that assists with user queries against API, things like querying information or creating resources.\nSome user queries can be resolved in a single API call, particularly if you can find appropriate params from the OpenAPI spec; though some require several API call.\nYou should always plan your API calls first, and then execute the plan second.\nYou should never return information without executing the api_controller tool.\n\n\nHere are the tools to plan and execute API requests: {tool_descriptions}\n\n\nStarting below, you should follow this format:\n\nUser query: the query a User wants help with related to the API\nThought: you should always think about what to do\nAction: the action to take, should be one of the tools [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I am finished executing a plan and have the information the user asked for or the data the used asked to create\nFinal Answer: the final output from executing the plan\n\n\nExample:\nUser query: can you add some trendy stuff to my shopping cart.\nThought: I should plan API calls first.\nAction: api_planner\nAction Input: I need to find the right API calls to add trendy items to the users shopping cart\nObservation: 1) GET /items with params 'trending' is 'True' to get trending item ids\n2) GET /user to get user\n3) POST /cart to post the trending items to the user's cart\nThought: I'm ready to execute the API calls.\nAction: api_controller\nAction Input: 1) GET /items params 'trending' is 'True' to get trending item ids\n2) GET /user to get user\n3) POST /cart to post the trending items to the user's cart\n...\n\nBegin!\n\nUser query: {input}\nThought: I should generate a plan to help with this query and then copy that plan exactly to the controller.\n{agent_scratchpad}"
] |
2024-01-10 | AdieLaine/lnu-ai | src~lnu-ai.py | # System-related
import os
import sys
import tempfile
import time
# File and IO operations
import json
import jsonlines
import librosa
import soundfile as sf
# Data processing and manipulation
import numpy as np
import pandas as pd
import random
# Data visualization
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from bokeh.plotting import figure
from scipy.signal import spectrogram
from streamlit_agraph import agraph, Node, Edge, Config
# Image processing
from PIL import Image
# Network requests
import requests
# Text-to-speech
from gtts import gTTS
from pydub import AudioSegment
# Type hints
from typing import Dict, List, Tuple, Optional
# Language model
import openai
# Web application framework
import streamlit as st
from streamlit_option_menu import option_menu
# Machine learning metrics and preprocessing
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
from scipy.spatial.distance import cosine
# Environment variables
from dotenv import load_dotenv
CUSTOM_CSS = """
<style>
.big-font {
font-size: 20px;
font-weight: bold;
}
.red-text {
color: crimson;
}
.green-text {
color: #42f5ad;
}
.blue-text {
color: #4287f5;
}
.selected-word {
font-weight: bold;
color: #f542f7;
}
.yellow-text {
color: #FFD700;
}
.custom-option-menu select {
font-weight: bold;
color: #FF6347;
background-color: #FFF;
}
</style>
"""
# Streamlit configurations
st.set_page_config(
page_title="Lnu-AI - An Indigenous AI System",
page_icon="🪶",
layout="wide",
initial_sidebar_state="auto",
menu_items={
'Get Help': 'https://github.com/AdieLaine/lnu-ai',
'Report a bug': 'https://github.com/AdieLaine/lnu-ai/issues',
'About': """
# Lnu-AI
Welcome to Lnu-AI! This application is dedicated to helping people learn and appreciate the Mi'kmaq language, an indigenous language of Eastern Canada and the United States.
## About Mi'kmaq Language
The Mi'kmaq language is a rich, polysynthetic language with a deep historical and cultural significance. It is, however, at risk of being lost as the number of fluent speakers decreases.
## The Lnu-AI Project
Lnu-AI utilizes advanced AI technologies to provide a platform for learning, using, and preserving the Mi'kmaq language. It offers various features like chat functionality, storytelling, and deep linguistic analysis to facilitate language learning and appreciation.
## Your Contribution
As an open-source project, we welcome contributions that help improve Lnu-AI and further its mission to preserve the Mi'kmaq language. Please visit our [GitHub](https://github.com/AdieLaine/lnu-ai) page for more information.
Enjoy your journey with the Mi'kmaq language!
"""
}
)
@st.cache_data
def load_all_word_details(file):
"""
Load word details from a JSON file.
Parameters:
file (str): Path to the JSON file containing the word details.
Returns:
all_word_details (dict): Dictionary of word details loaded from the JSON file.
"""
full_path = os.path.join("data", file)
if os.path.isfile(full_path):
with open(full_path, 'r') as f:
all_word_details = json.load(f)
return all_word_details
else:
return None
file = 'all_word_details.json'
all_word_details: dict = load_all_word_details(file)
@st.cache_data
def load_trained_data(file_paths):
"""
Load trained data from multiple files. Handles both formats.
Parameters:
file_paths (list): List of paths to the trained data files.
Returns:
trained_data (list): List of trained data.
"""
trained_data: list = []
# Prioritize 'trained_data' in the list
sorted_file_paths = sorted(file_paths, key=lambda path: "trained_data" != path)
for file_path in sorted_file_paths:
full_path = os.path.join("data", file_path)
if os.path.isfile(full_path):
with open(full_path, 'r') as f:
trained_data.extend([json.loads(line) for line in f])
if all('prompt' in data and 'completion' in data for data in trained_data):
return trained_data
else:
return None
file_paths = ['trained_data.jsonl']
trained_data: list = load_trained_data(file_paths)
@st.cache_data
def load_word_details_embeddings(file):
"""
Load word embeddings from a JSON file.
Parameters:
file (str): Path to the JSON file containing the word embeddings.
Returns:
word_details_embeddings (dict): Dictionary of word embeddings loaded from the JSON file.
"""
full_path = os.path.join("data", file)
if os.path.isfile(full_path):
with open(full_path, 'r') as f:
try:
word_details_embeddings = json.load(f)
return word_details_embeddings
except json.JSONDecodeError:
# Handle JSON decoding error
return None
else:
# Handle file not found
return None
file = 'word_details_embeddings.json'
word_details_embeddings: dict = load_word_details_embeddings(file)
@st.cache_data
def load_trained_data_embeddings(file_path):
"""
Load trained data embeddings from a JSON file.
Parameters:
file_path (str): Path to the JSON file containing the trained data embeddings.
Returns:
trained_data_embeddings (dict): Dictionary of trained data embeddings loaded from the JSON file.
"""
full_path = os.path.join("data", file_path)
if os.path.isfile(full_path):
with open(full_path, 'r') as file:
trained_data_embeddings = json.load(file)
return trained_data_embeddings
else:
return None
file_path = 'trained_data_embeddings.json'
trained_data_embeddings: dict = load_trained_data_embeddings(file_path)
@st.cache_data
def load_theme_and_story(jsonl_file):
"""
Load all themes from a JSONL file.
Args:
jsonl_file (str): Path to the JSONL file containing the themes.
Returns:
themes (list): A list containing all themes, or None if an error occurred.
"""
try:
full_path = os.path.join("data", jsonl_file)
with jsonlines.open(full_path) as reader:
themes = list(reader) # Read all themes into a list
return themes
except (FileNotFoundError, jsonlines.jsonlines.InvalidLineError) as e:
st.error(f"Error in loading themes: {str(e)}")
return None
jsonl_file = "mikmaq_semantic.jsonl"
themes = load_theme_and_story(jsonl_file)
@st.cache_data
def load_word_data(file_path):
"""
Load Lnu-AI Dictionary Word Data from a JSON file.
Parameters:
file_path (str): Path to the JSON file containing the Lnu-AI Dictionary Word Data.
Returns:
word_data (dict): Dictionary of Lnu-AI Dictionary Word Data loaded from the JSON file.
"""
data_path = os.path.join("data", file_path)
try:
with open(data_path, 'r') as file:
word_data = json.load(file) # Load and return the data
return word_data
except FileNotFoundError: # Handle file not found error
st.error(f"File {file_path} not found.")
return None
except json.JSONDecodeError: # Handle JSON decode error
st.error(f"Error decoding JSON file {file_path}.")
return None
@st.cache_data
def load_embeddings(file_path):
"""
Load embeddings from a JSON file.
Parameters:
file_path (str): Path to the JSON file containing the embeddings.
Returns:
embeddings (np.array): Array of embeddings loaded from the JSON file.
"""
data_path = os.path.join("data", file_path)
try:
with open(data_path, 'r') as file:
embeddings = np.array(json.load(file)) # Load the embeddings
embeddings = embeddings.reshape(embeddings.shape[0], -1) # Reshape the embeddings
return embeddings
except FileNotFoundError: # Handle file not found error
st.error(f"File {file_path} not found.")
return None
except json.JSONDecodeError: # Handle JSON decode error
st.error(f"Error decoding JSON file {file_path}.")
return None
except ValueError: # Handle reshape error
st.error(f"Error reshaping embeddings from file {file_path}.")
return None
@st.cache_data
def find_most_similar_word(input_word, word_details_embeddings: dict):
"""
Find the most similar word to the input word based on the cosine similarity of their embeddings.
Parameters:
input_word (str): The input word.
word_details_embeddings (dict): Dictionary of word embeddings.
Returns:
(str): The most similar word to the input word, or the input word itself if no embedding is found for it.
"""
input_embedding = word_details_embeddings.get(input_word)
if input_embedding is not None:
similarities = {word: 1 - cosine(input_embedding, embedding)
for word, embedding in word_details_embeddings.items() if word != input_word}
most_similar_word = max(similarities, key=similarities.get)
return most_similar_word
return input_word # If we don't have an embedding for the input word, just return the input word itself
@st.cache_data
def compute_cosine_similarity(vector1, vector2):
"""
Compute the cosine similarity between two vectors.
Parameters:
vector1, vector2 (list): Two vectors for which cosine similarity is to be computed.
Returns:
(float): Cosine similarity between the two vectors.
"""
return cosine_similarity([vector1], [vector2])[0][0]
@st.cache_data
def calculate_cosine_similarity(embeddings, word_index1, word_index2):
"""
Calculate the cosine similarity between two words based on their embeddings.
Parameters:
embeddings (np.array): Array of word embeddings.
word_index1, word_index2 (int): Indices of the words in the embeddings array.
Returns:
(float): Cosine similarity between the two words.
"""
try:
vector1 = embeddings[word_index1].reshape(1, -1) # Reshape the first vector
vector2 = embeddings[word_index2].reshape(1, -1) # Reshape the second vector
return cosine_similarity(vector1, vector2)[0][0] # Compute and return the cosine similarity
except IndexError: # Handle index out of range error
st.error("Word index out of range for embeddings.")
return None
@st.cache_data
def replace_unknown_words(user_message, word_details_embeddings: dict):
"""
Replace unknown words in a message with the most similar known words.
Parameters:
user_message (str): The user's message.
word_details_embeddings (dict): Dictionary of word embeddings.
Returns:
(str): The user's message with unknown words replaced with the most similar known words.
"""
words = user_message.split()
known_words = word_details_embeddings.keys()
new_words = [word if word in known_words else find_most_similar_word(word, word_details_embeddings) for word in words]
return ' '.join(new_words)
@st.cache_data
def clean_reply(reply):
"""
Cleans the assistant's reply by removing trailing whitespaces and an extra period at the end, as well as unwanted "user" or "assistant" at the beginning.
Parameters:
reply (str): Reply from the assistant.
Returns:
str: Cleaned reply.
"""
# Check if the reply starts with 'user:' or 'assistant:', and remove it if it does
if reply.startswith('user:') or reply.startswith('assistant:'):
reply = reply.split(':', 1)[1]
# Split the reply into lines
lines = reply.split('\n\n')
# Remove trailing whitespace from the last line
last_line = lines[-1].rstrip()
# Check if the last line ends with '.' and remove it if it does
if last_line.endswith("'""."):
last_line = last_line[:-1]
# Update the last line in the lines list
lines[-1] = last_line
# Join the lines back together
cleaned_reply = '\n'.join(lines)
return cleaned_reply.strip() # Added strip() to remove leading/trailing whitespace
@st.cache_resource(show_spinner=False)
def load_env_variables_and_data():
"""
Loads Lnu-AI Assistant environment variables and data.
Returns:
dict: A dictionary containing the loaded environment variables and data.
"""
api_keys, tts_settings, local_data_files, models = load_env_variables()
file_paths = ['trained_data.jsonl']
trained_data = load_trained_data(file_paths)
all_word_details = load_all_word_details(local_data_files.get("all_word_details"))
trained_data_embeddings = load_trained_data_embeddings(local_data_files.get("trained_data_embeddings"))
word_details_embeddings = load_word_details_embeddings(local_data_files.get("word_details_embeddings"))
return {
"api_keys": api_keys,
"tts_settings": tts_settings,
"local_data_files": local_data_files,
"models": models,
"completion_model": trained_data,
"all_word_details": all_word_details,
"trained_data": trained_data,
"trained_data_embeddings": trained_data_embeddings,
"word_details_embeddings": word_details_embeddings,
"CUSTOM_CSS": CUSTOM_CSS
}
@st.cache_resource(show_spinner=False)
def load_env_variables():
"""
Load all the environment variables required for the Lnu-AI Assistant.
Returns:
Tuple: A tuple containing the loaded environment variables.
"""
load_dotenv()
api_keys = {
"openai": os.getenv("OPENAI_API_KEY"),
"replicate": os.getenv("REPLICATE_API_TOKEN"),
}
tts_settings = {
"tts_audio": os.getenv('TTS_AUDIO'),
"eleven_labs": os.getenv('ELEVEN_LABS'),
"speechki_audio": os.getenv('SPEECHKI_AUDIO'),
"local_tts": os.getenv('LOCAL_TTS'),
}
local_data_files = {
"trained_data": (os.getenv("TRAINED_DATA") + '.jsonl') if os.getenv("TRAINED_DATA") else None,
"all_word_details": (os.getenv("ALL_WORD_DETAILS") + '.json') if os.getenv("ALL_WORD_DETAILS") else None,
"trained_data_embeddings": (os.getenv("TRAINED_DATA_EMBEDDINGS") + '.json') if os.getenv("TRAINED_DATA_EMBEDDINGS") else None,
"word_details_embeddings": (os.getenv("WORD_DETAILS_EMBEDDINGS") + '.json') if os.getenv("WORD_DETAILS_EMBEDDINGS") else None,
}
models = {
"chat_model": os.getenv("CHAT_MODEL_SELECTION", default="gpt-4-0613"),
"completion_model": os.getenv("COMPLETION_MODEL_SELECTION", default="text-davinci-003"),
"fine_tuned_model_dictionary": os.getenv("FINE_TUNED_MODEL_DICTIONARY"),
"fine_tuned_model_data": os.getenv("FINE_TUNED_MODEL_DATA"),
}
openai.api_key = api_keys["openai"]
return api_keys, tts_settings, local_data_files, models
def generate_audio(text, tts_service):
"""
Generate audio from text using a TTS service.
Parameters:
text (str): The input text.
tts_service (str): The TTS service to use (e.g., "gtts", "st.audio").
Returns:
str: Path to the generated audio file.
"""
tts_service = tts_service.lower() # convert to lower case for case-insensitive comparison
if tts_service == 'gtts':
tts = gTTS(text=text, lang='en') # Replace 'en' with the appropriate language if needed
# Save the speech audio into a temporary mp3 file
temp_mp3 = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
tts.save(temp_mp3.name)
# Convert the temporary mp3 file to wav format
audio = AudioSegment.from_mp3(temp_mp3.name)
temp_wav = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
audio.export(temp_wav.name, format='wav')
# Return the path to the generated wav file
return temp_wav.name
elif tts_service == 'st.audio':
return text
else:
raise ValueError(f"Invalid Text-to-Speech service: {tts_service}")
def generate_openai_images(prompt, role="DALL-E", context="In the creative and vibrant style of Norval Morrisseau, using colorful Mi'kmaq themes"):
"""
Generates an image using the OpenAI's DALL-E model.
Args:
prompt (str): The main role for the image generation.
context (str, optional): Context to provide to the image generation.
Defaults to "In the creative and vibrant style of Norval Morrisseau, using colorful Mi'kmaq themes".
Returns:
str: URL of the generated image if successful, else None.
"""
try:
full_prompt = f"{context} {prompt}"
truncated_prompt = full_prompt[:300]
prompt_settings = {
"model": "image-alpha-001",
"prompt": truncated_prompt,
}
response_settings = {
"num_images": 1,
"size": "1024x1024",
"response_format": "url"
}
openai_key = os.getenv("OPENAI_API_KEY", "your_default_key") # Use your default key here
if not openai_key:
st.info("Environment variable for OPENAI_API_KEY is not set, using default key: your_default_key")
response = requests.post(
"https://api.openai.com/v1/images/generations",
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {openai_key}"
},
json={
**prompt_settings,
**response_settings
}
)
response.raise_for_status() # Raise an exception for any HTTP errors
image_url = response.json()["data"][0]["url"]
return image_url
except (requests.RequestException, ValueError) as e:
st.error(f"Error in generating images: {str(e)}")
return None
@st.cache_data(experimental_allow_widgets=True)
def display_sound_of_words(user_selected_word, all_word_details: dict):
"""
Process sound of words data for visualization.
Args:
user_selected_word (str): The selected word.
all_word_details (dict): Dictionary of word details.
Returns:
None.
"""
# If the word exists in the dictionary
if user_selected_word in all_word_details:
# Find wav files
dir_path = os.path.join('media', 'audio', user_selected_word[0].lower(), user_selected_word)
# If the directory exists
if os.path.isdir(dir_path):
wav_files = [f for f in os.listdir(dir_path) if f.endswith('.wav')]
# If there are no WAV files
if not wav_files:
st.error(f"No WAV files found in the directory: {dir_path}")
return
selected_wav = st.sidebar.selectbox('Select the WAV file', wav_files)
# If a wav file is selected
if selected_wav:
# Path to the selected wav file
wav_file_path = os.path.join(dir_path, selected_wav)
# Display the audio player in the sidebar
st.sidebar.audio(wav_file_path)
# Display selected visuals
selected_visuals = st.sidebar.multiselect('Select visuals to display',
options=['MFCC 3D Plot', 'Pitch Contour', 'Audio Waveform', 'Spectrogram'],
default=['MFCC 3D Plot', 'Pitch Contour'])
# Loading audio data, handling audio issues
try:
y, sr = librosa.load(wav_file_path)
except Exception as e:
st.error(f"Error loading the audio file: {wav_file_path}. Error: {e}")
return
# Check if the audio data is valid
if y.size == 0:
st.error(f"The audio file {wav_file_path} is empty or corrupted.")
return
if 'MFCC 3D Plot' in selected_visuals:
# Compute MFCC features
mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
# Create 3D plot for MFCC
fig_mfcc = go.Figure(data=go.Surface(z=mfccs, colorscale='Viridis', colorbar=dict(title='MFCC')))
fig_mfcc.update_layout(
title='MFCC (3D view)',
scene=dict(
xaxis_title='Time',
yaxis_title='MFCC Coefficients',
zaxis_title='MFCC Value'
)
)
# Display the plot
st.plotly_chart(fig_mfcc)
st.info("The 3D plot above is a representation of MFCC (Mel-frequency cepstral coefficients). It's a type of feature used in sound processing. The axes represent Time, MFCC Coefficients, and MFCC Value.")
if 'Pitch Contour' in selected_visuals:
# Compute the spectral centroid and pitch
pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
# Prepare data for 3D plot
x = np.array([i for i in range(pitches.shape[1]) for _ in range(pitches.shape[0])])
y = np.array([i for _ in range(pitches.shape[1]) for i in range(pitches.shape[0])])
z = pitches.flatten()
pitch_mag = magnitudes.flatten()
# Create color and size based on pitch magnitude
color = pitch_mag / np.max(pitch_mag)
size = pitch_mag * 50 / np.max(pitch_mag)
# Create 3D plot for pitch contour
fig_pitch = go.Figure(data=[go.Scatter3d(x=x, y=y, z=z, mode='markers', marker=dict(size=size, color=color, colorscale='Viridis', opacity=0.8))])
fig_pitch.update_layout(scene=dict(xaxis_title='Time (s)', yaxis_title='Frequency Bin', zaxis_title='Pitch (Hz)'), title=f'Pitch Contour: {user_selected_word}')
st.plotly_chart(fig_pitch)
st.info("The 3D plot above is a representation of the pitch contour. It's a way of visualizing the variation in pitch with respect to time.")
if 'Audio Waveform' in selected_visuals:
# Display audio waveform
st.subheader("Audio Waveform")
waveform_fig = view_audio_waveform(wav_file_path)
st.pyplot(waveform_fig)
st.info("The Audio Waveform is a graphical representation of the amplitude of the sound wave against time.")
if 'Spectrogram' in selected_visuals:
# Display spectrogram
st.subheader("Spectrogram")
spectrogram_fig = generate_audio_spectrogram(wav_file_path)
st.pyplot(spectrogram_fig)
st.info("The Spectrogram represents how the frequencies of the audio signal are distributed with respect to time.")
else:
st.info(f"No directory found for the word {user_selected_word} at the expected location: {dir_path}")
# filter the words that begin with 'q'
q_words = [(word, all_word_details[word]['meanings']) for word in all_word_details.keys() if word[0].lower() == 'q']
# create a markdown string with words and their meanings as a bullet point list
# strip the square brackets and single quotes from meanings
q_words_info = '\n'.join([f'- <span style="color:red;">{word}</span>: <span style="color:white;">{str(meanings)[2:-2]}</span>' for word, meanings in q_words])
st.markdown(f"We have limited the words for to demonstrate the function. These words are available to use:\n{q_words_info}", unsafe_allow_html=True)
else:
st.error(f"The word {user_selected_word} not found in the database.")
def view_audio_waveform(input_audio):
"""
Generate a waveform visualization of the audio.
Parameters:
input_audio (str): Path to the input audio file.
Returns:
fig (matplotlib.figure.Figure): The generated waveform plot figure.
"""
waveform, sample_rate = sf.read(input_audio)
time = np.arange(0, len(waveform)) / sample_rate
fig = plt.figure(figsize=(10, 4))
plt.plot(time, waveform)
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.title('Audio Waveform')
return fig
def generate_audio_spectrogram(input_audio):
"""
Generate a spectrogram visualization of the audio.
Parameters:
input_audio (str): Path to the input audio file.
Returns:
fig (matplotlib.figure.Figure): The generated spectrogram plot figure.
"""
waveform, sample_rate = sf.read(input_audio)
_, _, Sxx = spectrogram(waveform, fs=sample_rate)
fig = plt.figure(figsize=(10, 6))
plt.imshow(np.log10(Sxx + 1e-10), aspect='auto', cmap='inferno', origin='lower')
plt.xlabel('Time')
plt.ylabel('Frequency')
plt.title('Spectrogram')
return fig
def generate_audio_visualization(input_audio, audio_info):
"""
Generate a 3D visual representation of the audio.
Parameters:
input_audio (str): Path to the input audio file.
audio_info (dict): Dictionary containing audio information (file_name, sample_rate, duration).
Returns:
fig (plotly.graph_objects.Figure): The generated 3D audio visualization figure.
"""
waveform, sample_rate = sf.read(input_audio)
_, _, Sxx = spectrogram(waveform, fs=sample_rate)
fig = go.Figure(data=[go.Surface(z=np.log10(Sxx + 1e-10), colorscale='inferno')])
fig.update_layout(scene=dict(
xaxis_title='Time',
yaxis_title='Frequency',
zaxis_title='Intensity',
aspectratio=dict(x=1, y=1, z=0.3),
xaxis=dict(showgrid=False, showticklabels=False),
yaxis=dict(showgrid=False, showticklabels=False),
zaxis=dict(showgrid=False, showticklabels=False),
camera=dict(
eye=dict(x=1.8, y=1.8, z=0.8),
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0)
)
))
annotation_text = f"Audio File: {audio_info['file_name']}\n" \
f"Sample Rate: {audio_info['sample_rate']} Hz\n" \
f"Duration: {audio_info['duration']} seconds"
fig.update_layout(
annotations=[
go.layout.Annotation(
x=0.05,
y=0.95,
xref='paper',
yref='paper',
text=annotation_text,
showarrow=False,
font=dict(size=12)
)
]
)
return fig
@st.cache_data(show_spinner=True)
def generate_word_translations_visualization(embeddings, word_list, all_word_details, selected_word, num_nodes, edge_option, previous_node_ids):
"""
Generates a visualization of word translations based on vector embeddings.
Parameters:
embeddings (numpy.ndarray): Array of word embeddings.
word_list (list): List of words corresponding to the embeddings.
all_word_details (dict): Dictionary containing additional details for each word.
selected_word (str): The selected word for visualization.
num_nodes (int): The number of closest words to include in the visualization.
edge_option (list): List of edge options to include in the visualization.
previous_node_ids (set): Set of previously added node IDs.
Returns:
tuple: A tuple containing the nodes, edges, config, and updated previous_node_ids.
"""
# Fetch index of selected word
word_index = word_list.index(selected_word)
# Initialize nodes and edges
nodes = []
edges = []
# Create the main node for the selected word
main_word_node = Node(id=selected_word, label=selected_word, shape="box", font={"size": 20}, color="crimson")
# Add the main node to the nodes list if not already added
if main_word_node.id not in previous_node_ids:
nodes.append(main_word_node)
previous_node_ids.add(main_word_node.id)
# Calculate cosine similarities and get indices of closest words
closest_word_indices = np.argsort([calculate_cosine_similarity(embeddings, word_index, i) for i in range(len(word_list))])[::-1]
# Define node colors for different edge options
node_colors = {"translation": "cyan", "part_of_speech": "limegreen", "pronunciation": "red", "meanings": "gold"}
# Create nodes and edges for closest words
for i in range(1, num_nodes + 1):
closest_word = word_list[closest_word_indices[i]]
closest_word_node = Node(id=closest_word, label=closest_word, shape="box", font={"size": 12}, color="lavender")
# Add closest word node to the nodes list if not already added
if closest_word_node.id not in previous_node_ids:
nodes.append(closest_word_node)
previous_node_ids.add(closest_word_node.id)
# Create an edge between the selected word and the closest word
translation_edge = Edge(source=selected_word, target=closest_word)
edges.append(translation_edge)
# Create additional nodes and edges for selected edge options
for key in edge_option:
if key in all_word_details[closest_word]:
value = all_word_details[closest_word][key]
formatted_key = key.replace("_", " ").title()
if formatted_key == 'Part Of Speech':
formatted_key = 'Part of Speech'
key_node = Node(id=f"{closest_word}-{key}", label=f"{formatted_key}: {value}", shape="box", font={"size": 10}, color=node_colors[key])
# Add key node to the nodes list if not already added
if key_node.id not in previous_node_ids:
nodes.append(key_node)
previous_node_ids.add(key_node.id)
# Create an edge between the closest word and the key node
translation_edge = Edge(source=closest_word, target=f"{closest_word}-{key}")
edges.append(translation_edge)
# Create a configuration object for the graph
config = Config(width=1024, height=1024, directed=True, hierarchical=True, center_nodes=True)
return nodes, edges, config, previous_node_ids
def explore_words_multiselect(key, label, options, default, format_func):
"""
Creates a multiselect dropdown in the sidebar with session state.
Parameters:
key (str): Unique identifier for the session state variable.
label (str): Label to display above the dropdown menu.
options (list): List of options for the dropdown menu.
default (list): Default selection for the dropdown menu.
format_func (function): Function to format the display of the options.
"""
# Initialize session state with default values if the key doesn't exist
if key not in st.session_state:
st.session_state[key] = default
# Create a multiselect dropdown menu
selection = st.sidebar.multiselect(
label=label,
options=options,
default=st.session_state[key],
format_func=format_func,
help=":bulb: Select one or more options for the edge."
)
# If "All Nodes" is selected, include all edge options in the state
if "all" in selection:
selection = options
# Update the session state when the selection changes
if st.session_state[key] != selection:
st.session_state[key] = selection
@st.cache_data
def explore_word_details(selected_word, all_word_details):
"""
Displays details of a selected word in the sidebar. If the word does not exist, an error message is displayed.
Parameters:
selected_word (str): Word selected by the user.
all_word_details (dict): Dictionary of all word details.
"""
# If the selected word does not exist, display an error message and return
if selected_word not in all_word_details:
st.error(f"The word {selected_word} does not exist in the dictionary.")
return
# Get details for the selected word
word_detail = all_word_details[selected_word]
word = word_detail.get("word", "")
pronunciation = word_detail.get("pronunciation", "")
part_of_speech = word_detail.get("part_of_speech", "")
translation = word_detail.get("translation", "")
meanings = word_detail.get("meanings", ["N/A"])
example = word_detail.get("example", ["N/A"])
alternate_forms = word_detail.get("alternate_forms", ["N/A"])
# Only display details if they exist
if word:
st.markdown(f"<div style='color:CornflowerBlue;'>Word: </div>{word}", unsafe_allow_html=True)
if pronunciation:
st.markdown(f"<div style='color:CornflowerBlue;'>Pronunciation: </div>{pronunciation}", unsafe_allow_html=True)
if part_of_speech:
st.markdown(f"<div style='color:CornflowerBlue;'>Part of Speech: </div>{part_of_speech}", unsafe_allow_html=True)
if translation:
st.markdown(f"<div style='color:CornflowerBlue;'>Translation: </div>{translation}", unsafe_allow_html=True)
st.markdown("<div style='color:CornflowerBlue;'>Meanings:</div>", unsafe_allow_html=True)
for meanings in meanings:
st.markdown(f"- {meanings}", unsafe_allow_html=True)
st.markdown("<div style='color:CornflowerBlue;'>Example of word used in sentence:</div>", unsafe_allow_html=True)
for example_line in example:
example_parts = example_line.split('\n')
if len(example_parts) >= 3:
st.markdown(
f"{example_parts[0]} <br> {example_parts[1]} <br> <span style='color:white;'>{example_parts[2]}</span>",
unsafe_allow_html=True
)
def explore_words():
"""
Loads the word data and embeddings, creates an interactive search feature,
and displays visualizations based on user selection.
"""
# Set up custom CSS
st.markdown(CUSTOM_CSS, unsafe_allow_html=True)
# Define file path for "bnr2.png"
image_path = os.path.join(os.getcwd(), "images", "bnr2.png")
# Load and display the image in the sidebar
image = Image.open(image_path)
st.sidebar.image(image, use_column_width=True, clamp=True, channels="RGB", output_format="png")
# Instruction text
st.sidebar.markdown("""
<div style="text-align: center;">
<div class="big-font -text center-text" style="color: crimson; font-size: 24px;">Examine Words</div>
</div>
<ul>
<li>Type an English word into the 'Search for a word' field.</li>
<li>Explore other Mi'kmaq words with similar meaning.</li>
<li>Select a Method to Explore</li>
</ul>
""", unsafe_allow_html=True)
st.sidebar.markdown("---")
st.sidebar.info("The Vector Matrix of all Mi'kmaq Words\n\n- Explore New Word Connections\n- See and Hear the Essence of Mi'kmaq Orthography\n- Dive into the Frequency of Mi'kmaq Words\n- What Makes a Word a Word?")
# Display the image in the sidebar
search_word = st.sidebar.text_input("Type an English word", value="")
# Instruction text
st.markdown("""
<div style="display: flex; flex-direction: column; align-items: center; text-align: center;">
<div class="big-font">
<span style="font-weight:bold; font-size:24px; color:CornflowerBlue;">Examine</span>
<span style="color:white;">:</span>
<span style="font-size:24px; color:crimson;">Panuijgatg</span>
</div>
<ul style="text-align: left;">
<li>Each Mi'kmaq word carries a linguistic tapestry of cultural significance and historical depth.</li>
<li>Mi'kmaq words harmoniously blend sound, syntax, and semantics to convey profound concepts.</li>
<li>The intricacy of Mi'kmaq words reveals the linguistic ingenuity of the Mi'kmaq people.</li>
</ul>
</div>
""", unsafe_allow_html=True)
st.divider()
# Custom CSS for the image and title
st.markdown("""
<style>
.explore-words-title {
font-size: 24px;
text-align: center;
}
</style>
""", unsafe_allow_html=True)
# Define file paths
file_path = 'all_word_details.json'
embeddings_path = 'embedded_word_data.json'
# Load word data and embeddings
all_word_details = load_word_data(file_path)
embeddings = load_embeddings(embeddings_path)
# If loading fails, terminate the function
if all_word_details is None or embeddings is None:
return
# Generate word list and transform data to DataFrame
word_list = list(all_word_details.keys())
df = pd.DataFrame([(key, value["translation"], value["meanings"], value["example"]) for key, value in all_word_details.items()],
columns=["word", "translation", "meanings", "example"])
# Sort the DataFrame by 'word' column in ascending order
df = df.sort_values(by=['word'])
# Resetting the index
df = df.reset_index(drop=True)
similar_words = None
selected_word = ""
user_selected_word = ""
# Check if a search_word has been entered
if search_word:
search_word_lower = search_word.lower()
# Find the exact match in the "translation" field where the searched word is the first word
exact_match_translation = df[df['translation'].apply(lambda translation: translation.lower().split()[0] == search_word_lower)]['word'].values
if len(exact_match_translation) > 0:
selected_word = exact_match_translation[0]
else:
# Find the word in the "meanings" field where the searched word is present
similar_words_meanings = df[df['meanings'].apply(lambda meanings: any(search_word_lower in meanings.lower().split() for meanings in meanings))]['word'].values
if len(similar_words_meanings) > 0:
selected_word = similar_words_meanings[0]
if not selected_word:
st.sidebar.write("No similar word found.")
if selected_word:
# Get index of the selected word in dataframe
selected_word_index = df.index[df['word'] == selected_word].tolist()[0]
# Get next 19 words after the selected word
next_words = df.iloc[selected_word_index+1 : selected_word_index+20]['word'].values
# Combine the selected word with next 19 words
combined_words = np.concatenate(([selected_word], next_words))
user_selected_word = st.sidebar.selectbox("Similar words:", combined_words)
visual_option = st.sidebar.selectbox("Select a method to explore", ("Word Visualization", "Sound of Words"))
# Check if 'Sound of Words' is not selected
if visual_option != "Sound of Words":
# Edge option dropdown, number of nodes slider
explore_words_multiselect(
key="edge_option",
label=":gear: Edge Option", # Adding an emoji to the label
options=["all", "translation", "part_of_speech", "pronunciation", "meanings"],
default="all",
format_func=lambda x: "All Nodes" if x == "all" else {
"translation": "Translation",
"part_of_speech": "Part of Speech",
"pronunciation": "Pronunciation",
"meanings": "Meanings",
}.get(x, "Unknown")
)
num_nodes = st.sidebar.slider("Number of nodes", 5, 200, 10)
# Check if 'All Nodes' is selected
if "all" in st.session_state["edge_option"]:
st.session_state["edge_option"] = ["translation", "part_of_speech", "pronunciation", "meanings"]
# Initialize set to keep track of added node IDs
previous_node_ids = set()
# Display word details in sidebar
if user_selected_word:
with st.sidebar:
explore_word_details(user_selected_word, all_word_details)
# Auto-render the first visual in the main area
if visual_option == "Word Visualization":
if user_selected_word:
# Call the generate_word_translations_visualization function
nodes, edges, config, previous_node_ids = generate_word_translations_visualization(embeddings, word_list, all_word_details, user_selected_word, num_nodes, st.session_state["edge_option"], previous_node_ids)
agraph(nodes=nodes, edges=edges, config=config)
elif visual_option == "Sound of Words":
# Call the display_sound_of_words function
display_sound_of_words(user_selected_word, all_word_details)
@st.cache_resource(show_spinner=False)
def enhance_with_gpt(prompt, final_reply, models):
"""
Enhances the reply with GPT model by sending the conversation for completion.
Args:
prompt (str): User's message.
final_reply (str): Assistant's reply.
models (dict): Dictionary of trained models.
Returns:
str: Enhanced reply.
"""
model_name = models.get("chat_model", "gpt-4-0613")
try:
gpt_messages = [
{"role": "system", "content": "You are Lnu-AI, an AI developed to promote and preserve the Mi'kmaq language and culture."},
{"role": "user", "content": prompt},
{"role": "assistant", "content": final_reply}
]
response = openai.ChatCompletion.create(
model=model_name,
messages=gpt_messages,
max_tokens=1200,
temperature=0.5,
top_p=1.0,
frequency_penalty=0.5,
presence_penalty=0.5,
)
# Extract reply from response
if 'choices' in response and len(response['choices']) > 0:
if 'message' in response['choices'][0] and 'content' in response['choices'][0]['message']:
gpt_reply = response['choices'][0]['message']['content']
gpt_reply = clean_reply(gpt_reply)
return gpt_reply
except Exception as e:
print(f"Error with {model_name}: {e}")
return final_reply
@st.cache_data(show_spinner=True)
def lnu_ai_chat(prompt: str, trained_data: list, models: dict, word_details_embeddings: dict) -> str:
"""
Generates a response for a given user prompt using trained data or a language model.
Ensures that unknown words in the user's message are replaced with the most similar known words.
Args:
prompt (str): The message inputted by the user.
trained_data (list): A list of dictionaries containing prompts and their corresponding completions from training data.
models (dict): A dictionary of trained models for the chat bot.
word_details_embeddings (dict): A dictionary of word embeddings for known words.
Returns:
str: The generated response from the chat bot.
"""
# Replace unknown words in the user's prompt
prompt = replace_unknown_words(prompt.lower(), word_details_embeddings)
# Check for matches in trained data
matched_prompt_data = [data for data in trained_data if prompt == data['prompt'].lower()]
if matched_prompt_data:
fine_tuned_replies = [data['completion'] for data in matched_prompt_data]
reply = '\n\n'.join(fine_tuned_replies)
reply = clean_reply(reply)
# Enhance the reply using GPT
reply = enhance_with_gpt(prompt, reply, models)
# Add the new reply to the trained_data
if reply not in fine_tuned_replies:
new_prompt_data = {'prompt': prompt, 'completion': reply}
trained_data.append(new_prompt_data)
return reply
# If no match found in trained data, generate completion with the trained model
model_to_use = models["fine_tuned_model_data"] if "fine_tuned_model_data" in models else models.get("completion_model", "curie:ft-personal-2023-05-16-05-11-43")
response = openai.Completion.create(
model=model_to_use,
prompt=prompt,
max_tokens=1000,
temperature=0.4,
)
if 'choices' in response and len(response['choices']) > 0:
if 'text' in response['choices'][0]:
final_reply = response['choices'][0]['text']
final_reply = clean_reply(final_reply)
# Enhance the reply using GPT
final_reply = enhance_with_gpt(prompt, final_reply, models)
# Add this new prompt-reply pair to the trained_data
new_prompt_data = {'prompt': prompt, 'completion': final_reply}
trained_data.append(new_prompt_data)
return final_reply
# Default response if no response is generated
return "Sorry, I don't understand. Please try again."
def display_detail(label, value):
"""
Helper function to display a single detail of a word.
Args:
label (str): Label for the detail.
value (str): Value of the detail.
"""
if value is None or value == "N/A":
value = "Not available"
st.markdown(f"<div style='color:CornflowerBlue;'>{label}: </div>{value}", unsafe_allow_html=True)
@st.cache_data(show_spinner=False)
def display_word_details_chatbot(selected_word, all_word_detail: dict):
"""
Displays details of a selected word in the chatbot interface.
Parameters:
selected_word (str): Selected word.
all_word_detail (dict): Details of all words.
"""
if selected_word not in all_word_detail:
st.error(f"The word {selected_word} does not exist in the dictionary.")
return
word_detail = all_word_detail[selected_word]
word = word_detail.get("word", "N/A")
pronunciation = word_detail.get("pronunciation", "N/A")
part_of_speech = word_detail.get("part_of_speech", "N/A")
translation = word_detail.get("translation", "N/A")
meanings = word_detail.get("meanings", ["N/A"])
example = word_detail.get("example", ["N/A"])
alternate_forms = word_detail.get("alternate_forms", ["N/A"])
# Display the word details using the helper function
display_detail('Word', word)
display_detail('Pronunciation', pronunciation)
display_detail('Part of Speech', part_of_speech)
display_detail('Translation', translation)
# Display meanings
st.markdown("<div style='color:CornflowerBlue;'>Meanings:</div>", unsafe_allow_html=True)
for meanings in meanings:
st.markdown(f"- {meanings}", unsafe_allow_html=True)
st.sidebar.markdown("---")
def chatbot_application(models, trained_data: dict, local_data_files: dict, tts_settings: dict):
"""
Main chatbot application function. It loads previous session states or initializes new ones,
sets up the sidebar and the main chat area, and handles user input and chatbot responses.
Parameters:
models (dict): Dictionary of trained models.
trained_data (dict): Dictionary of trained data.
local_data_files (dict): Dictionary of paths to local data files.
tts_settings (dict): Text-to-speech settings.
"""
# Custom CSS for the image and title
st.markdown("""
<style>
.explore-words-title {
font-size: 24px;
text-align: center;
}
</style>
""", unsafe_allow_html=True)
# Define file paths
image_path = os.path.join(os.getcwd(), "images", "bnr4.png")
# Load and display images in the sidebar
image = Image.open(image_path)
st.sidebar.image(image, use_column_width=True, clamp=True, channels="RGB", output_format="png")
# Instruction text
st.sidebar.markdown("""
<div style="text-align: center;">
<div class="big-font -text center-text" style="color: crimson; font-size: 24px;">Lnu-AI Chat</div>
</div>
<ul>
<li>I am trained on all Mi'kmaq words.</li>
<li>Ask me about Mi'kmaq culture or history.</li>
<li>I will do my best to answer any question you have.</li>
</ul>
""", unsafe_allow_html=True)
st.sidebar.markdown("---")
st.sidebar.info("Ask me about:\n\n- Glooscap\n- The Mi'kmaq Creation Story\n- Mi'kmaq Linguistics\n- How to make a Canoe\n- Craft a Sweetgrass Basket\n- Mi'kmaq History")
# Sidebar: search field
search_word = st.sidebar.text_input("Search for a word", value="")
# Load previous session states or initialize new ones
st.session_state.setdefault('generated', [])
st.session_state.setdefault('past', [])
# Load the trained data
try:
trained_data_file = os.path.join("data", "trained_data.jsonl")
with open(trained_data_file, "r") as f:
trained_data = [json.loads(line) for line in f]
word_details_data = load_all_word_details(local_data_files["all_word_details"])
trained_data_embeddings = load_trained_data_embeddings(local_data_files["trained_data_embeddings"])
word_details_embeddings = load_word_details_embeddings(local_data_files["word_details_embeddings"])
except Exception as e:
st.error(f"Failed to load data: {e}")
return
# Define file paths
file_path = 'all_word_details.json'
embeddings_path = 'embedded_word_data.json'
trained_data = 'trained_data.jsonl'
# Load word data and embeddings
all_word_details = load_word_data(file_path)
embeddings = load_embeddings(embeddings_path)
trained_data = load_trained_data(file_paths)
# If loading fails, terminate the function
if all_word_details is None or embeddings is None:
return
# Generate word list and transform data to DataFrame
word_list = list(all_word_details.keys())
df = pd.DataFrame([(key, value["translation"], value["meanings"], value["example"]) for key, value in all_word_details.items()],
columns=["word", "translation", "meanings", "example"])
# Sort the DataFrame by 'word' column in ascending order
df = df.sort_values(by=['word'])
# Resetting the index
df = df.reset_index(drop=True)
similar_words = None
selected_word = ""
user_selected_word = ""
# Check if a search_word has been entered
if search_word:
search_word_lower = search_word.lower()
# Find the exact match in the "translation" field where the searched word is the first word
exact_match_translation = df[df['translation'].apply(lambda translation: translation.lower().split()[0] == search_word_lower)]['word'].values
if len(exact_match_translation) > 0:
selected_word = exact_match_translation[0]
else:
# Find the word in the "meanings" field where the searched word is present
similar_words_meanings = df[df['meanings'].apply(lambda meanings: any(search_word_lower in meanings.lower().split() for meanings in meanings))]['word'].values
if len(similar_words_meanings) > 0:
selected_word = similar_words_meanings[0]
if not selected_word:
st.sidebar.write("No similar word found.")
if selected_word:
# Get index of the selected word in dataframe
selected_word_index = df.index[df['word'] == selected_word].tolist()[0]
# Get next 19 words after the selected word
next_words = df.iloc[selected_word_index+1 : selected_word_index+20]['word'].values
# Combine the selected word with next 19 words
combined_words = np.concatenate(([selected_word], next_words))
user_selected_word = st.sidebar.selectbox("Similar words:", combined_words)
# Display word details in sidebar
if user_selected_word:
with st.sidebar:
display_word_details_chatbot(user_selected_word, all_word_details)
# Instruction text
st.markdown("""
<div style="display: flex; flex-direction: column; align-items: center; text-align: center;">
<div class="big-font">
<span style="font-weight:bold; font-size:24px; color:CornflowerBlue;">Talk Together</span>
<span style="color:white;">:</span>
<span style="font-size:24px; color:crimson;">Mawagnutmajig</span>
</div>
<ul style="text-align: left;">
<li>I’m Lnu-AI, your Mi'kmaq AI Assistant.</li>
<li>I am still learning and won’t always get it right, but our conversations will help me improve.</li>
<li>It serves as a means to preserve and transmit knowledge, values, beliefs, and history from one generation to the next.</li>
</ul>
</div>
""", unsafe_allow_html=True)
st.divider()
with st.container():
# Define color variables
question_color = "CornflowerBlue"
lnu_ai_color = "crimson"
background_color = "#262626"
# Chat form for user input
with st.form(key='chat_form', clear_on_submit=True):
user_message = st.text_area("Chat with Lnu-AI", value="", height=150, max_chars=500, key="chat_input_chat")
submit_button = st.form_submit_button("Send a message")
# Process user input and display chatbot response
if submit_button and user_message:
user_message = user_message.replace("\r\n", " ").replace("\n", " ").replace("\r", " ")
st.session_state['past'].append(user_message)
with st.spinner("Referring to my Mi'kmaq Corpus ..."):
chat_response = lnu_ai_chat(user_message, trained_data, models, all_word_details)
st.session_state['generated'].append(chat_response)
# Generate audio response and play it
tts_service = tts_settings.get('tts_audio', 'gtts')
audio_file = generate_audio(chat_response, tts_service)
audio_response = open(audio_file, "rb")
st.audio(audio_response.read(), format="audio/wav")
os.remove(audio_file)
# Display the chat history
for i in range(len(st.session_state['generated'])-1, -1, -1):
chat_response = st.session_state['generated'][i]
user_message = st.session_state['past'][i]
if chat_response: # Only display non-empty messages
st.markdown(
f'<div style="background-color: {background_color}; padding: 20px; border-radius: 20px;">'
f'<b style="color: {question_color};">Question:</b> {user_message}</div>',
unsafe_allow_html=True
)
st.markdown(
f'<div style="background-color: {background_color}; padding: 20px; border-radius: 20px;">'
f'<b style="color: {lnu_ai_color};">Lnu-AI:</b> {chat_response}</div>',
unsafe_allow_html=True
)
def display_mikmaq_resources() -> None:
"""
Display Mi'kmaq language resources in a Streamlit application.
"""
# Set up the sidebar
st.markdown(CUSTOM_CSS, unsafe_allow_html=True)
# Define file paths
image_path = os.path.join(os.getcwd(), "images", "bnr3.png")
image = Image.open(image_path)
st.sidebar.image(image, use_column_width="always", clamp=True, channels="RGB", output_format="png")
# Instruction text
st.sidebar.markdown("""
<div style="text-align: center;">
<div class="big-font -text center-text" style="color: crimson; font-size: 24px;">Language Preservation</div>
</div>
<ul></ul>
""", unsafe_allow_html=True)
st.sidebar.markdown("---")
page_selection = st.sidebar.radio("Go to", ["Home", "Helpful Links", "The Lnu-AI Project"])
st.sidebar.info("The Lnu-AI Project is open source. [Lnu-AI Repository](https://github.com/AdieLaine/lnu-ai)")
st.sidebar.info("Connect with the Developer on [Twitter](https://twitter.com/justmadielaine).")
if page_selection == "Home":
# Instruction text
st.markdown("""
<div style="display: flex; flex-direction: column; align-items: center; text-align: center;">
<div class="big-font">
<span style="font-weight:bold; font-size:24px; color:CornflowerBlue;">Preserve</span>
<span style="color:white;">:</span>
<span style="font-size:24px; color:crimson;">Gweso'tg</span>
</div>
<ul style="text-align: left;">
<li>Mi'kmaq language resources provide invaluable tools for learning, studying, and immersing oneself in the language.</li>
<li>Collaboration among linguists, educators, and community members strengthens the development and availability of Mi'kmaq language resources.</li>
<li>The commitment to Mi'kmaq language preservation and the availability of resources empower individuals to engage with and contribute to the revitalization of their language.</li>
</ul>
</div>
""", unsafe_allow_html=True)
st.divider()
elif page_selection == "The Lnu-AI Project":
image_path = os.path.join(os.getcwd(), "images", "LnuSideFlag.png")
# Load and display the image
image = Image.open(image_path)
st.image(image, use_column_width="always", clamp=True, channels="RGB", output_format="png")
st.markdown("""
## The :red[Lnu-AI] Project
The :red[Lnu-AI] Project is a groundbreaking initiative that creatively combines the power of Artificial Intelligence systems with a profound respect for cultural heritage to revolutionize the preservation and revitalization of the Mi'kmaq language. With its roots firmly anchored in the Mi'kmaq community, :red[Lnu-AI] leverages cutting-edge technologies, such as machine learning and natural language processing, to ensure the survival and advancement of this culturally significant language.
In an era where countless indigenous languages are on the brink of extinction, preserving them becomes an imperative mission. The Mi'kmaq language, echoing the voice of the Mi'kmaq people across Mi'kma'ki, is a living testament to a rich history and culture. The :red[Lnu-AI] Project endeavors to construct a dynamic platform that safeguards this linguistic legacy, fostering an environment where the Mi'kmaq language can flourish and be appreciated by both the Mi'kmaq community and the wider world.
At its core, :red[Lnu-AI] aims to offer meticulously accurate translations, definitions, and contextual applications of Mi'kmaq words and phrases. :red[Lnu-AI] uses sophisticated machine learning algorithms to train on an extensive dataset drawn from various Mi'kmaq language resources. This method guarantees :red[Lnu-AI]'s proficiency in comprehending the Mi'kmaq language, enabling it to generate specific and culturally relevant content. Whether you're an enthusiastic language learner, a cultural explorer, or intrigued by the Mi'kmaq language, :red[Lnu-AI] can provide accurate and detailed responses.
The scope of the :red[Lnu-AI] Project extends beyond just language preservation. Recognizing the connection between language and culture, :red[Lnu-AI] transcends typical translation services as a comprehensive digital archive of Mi'kmaq culture, traditions, history, and customs. This rich repository fuels a deeper understanding and appreciation of the Mi'kmaq lifestyle, with :red[Lnu-AI] capable of enlightening users about traditional ceremonies, folklore, art, music, and much more. This fusion of past language meanings and present-day AI solutions, like Large Language Models, intricate and novel connections form a bridge that connects users to the essence of the Mi'kmaq community and an Ancient Indigenous Language.
A crucial component of the :red[Lnu-AI] project is our emphasis on cultural sensitivity and accuracy. :red[Lnu-AI] respects and accurately portrays the intricacies of the Mi'kmaq language and culture. :red[Lnu-AI] offers a technologically advanced, culturally respectful, and accurate platform, fostering a genuine understanding of the Mi'kmaq heritage.
The:red[Lnu-AI] Project, seeks to preserve, promote, and celebrate the beauty of the Mi'kmaq language through Storytelling, Conversation and offering a deeper insight into this beautiful ancient language.
""")
elif page_selection == "Helpful Links":
st.subheader('Helpful Links')
st.markdown("""
- [Assembly of First Nations](http://www.afn.ca/)
Discover the Assembly of First Nations (AFN), Canada's premier national representative organization of over 630 First Nation communities.
- [Atlantic Canada's First Nation Help Desk](http://firstnationhelp.com/)
Visit Atlantic Canada's First Nation Help Desk, promoting digital literacy among students and teachers through accessible Internet content.
- [Listuguj Mi'gmaq Government](https://listuguj.ca/)
Explore the official portal of the Listuguj Mi'gmaq Government for comprehensive information on community, governance, and services.
- [Passamaquoddy-Maliseet Language Portal](http://pmportal.org/)
Connect with the rich linguistic heritage of Passamaquoddy-Maliseet speakers through the Language Portal's comprehensive dictionary and video archives.
- [Mi'gmawei Mawio'mi](http://www.migmawei.ca)
Learn about the Mi'gmawei Mawiomi Secretariat (Tribal Council), a collective representative body founded in 2000 for the Councils of Gespeg, Gesgapegiag, and Listuguj.
- [Mi'kmaq Resource Centre](https://www.cbu.ca/indigenous-affairs/unamaki-college/mikmaq-resource-centre/)
Engage with the rich repository of documents at the Mi'kmaq Resource Centre, a part of Unama'ki College at Cape Breton University, dedicated to Aboriginal research.
- [Mi’gmaq-Mi’kmaq Online](https://www.mikmaqonline.org/)
Access the Mi’gmaq-Mi’kmaq Online Dictionary, a comprehensive resource for the Mi’gmaq language, featuring a searchable database of words, phrases, and audio files.
- [Native Languages of the Americas: Mi'kmaq](http://www.native-languages.org/mikmaq.htm)
Experience the dedicated work of the non-profit organization, Native Languages of the Americas, in preserving and promoting indigenous languages including Mi'kmaq.
- [NativeTech: Native American Technology & Art](http://www.nativetech.org/)
Dive into the world of indigenous ethno-technology with NativeTech, focusing on the historical and contemporary arts of Eastern Woodland Indian Peoples.
- [`Nnui Tli'suti/`Lnuei Tli'suit](http://nnueitlisuti.webs.com/)
Access Watson Williams' detailed lesson plans on Mi'gmaw language, featuring reading and writing exercises in both the Listuguj and Smith-Francis writing systems.
""")
st.info("These links and more will be converted into an Vector Embedded database to preserve the content for future generations.")
def main_application(global_vars: dict) -> None:
"""
Main application function.
Args:
global_vars (dict): A dictionary containing all global variables.
"""
all_word_details = global_vars['all_word_details']
tts_settings = global_vars['tts_settings']
# Custom CSS for the image and title
st.markdown(CUSTOM_CSS, unsafe_allow_html=True)
# Define file path for "bnr1.png"
image_path = os.path.join(os.getcwd(), "images", "bnr1.png")
# Load and display the image
image = Image.open(image_path)
st.sidebar.image(image, use_column_width=True, clamp=True, channels="RGB", output_format="png")
# Instruction text
st.sidebar.markdown("""
<div style="text-align: center;">
<div class="big-font -text center-text" style="color: crimson; font-size: 24px;">Generate a Story</div>
</div>
<ul>
<li>Type an English word into the 'Search for a word' field.</li>
<li>Select a Mi'kmaq word from the 'Similar words' list.</li>
<li>Click Generate Story and an audio and visual story will be generated.</li>
</ul>
""", unsafe_allow_html=True)
st.sidebar.markdown("---")
search_word = st.sidebar.text_input("Search for a word", value="")
# Initialize sidebar
sidebar = st.sidebar
# Center the image using Streamlit's layout feature
col1, col2, col3 = st.columns([1, 2, 1])
with col2:
st.image(image, width=None, clamp=True, channels="RGB", output_format="PNG")
# Instruction text
st.markdown("""
<div style="display: flex; flex-direction: column; align-items: center; text-align: center;">
<div class="big-font">
<span style="font-weight:bold; font-size:24px; color:CornflowerBlue;">Storyteller</span>
<span style="color:white;">:</span>
<span style="font-size:24px; color:crimson;">A'tugwewinu</span>
</div>
<ul style="text-align: left;">
<li>Mi'kmaq storytelling plays a role in preserving the Mi'kmaq language.</li>
<li>Storytelling is a vital aspect of the Mi'kmaq people's culture and tradition.</li>
<li>It serves as a means to preserve and transmit knowledge, values, beliefs, and history from one generation to the next.</li>
</ul>
</div>
""", unsafe_allow_html=True)
st.divider()
# Initialize the variable selected_word
selected_word = None
# Load word data
if all_word_details is None:
st.sidebar.error("Failed to load word data.")
return
# Generate word list and transform data to DataFrame
word_list = list(all_word_details.keys())
df = pd.DataFrame([(key, value["translation"], value["meanings"], value["example"]) for key, value in all_word_details.items()],
columns=["word", "translation", "meanings", "example"])
# Sort the DataFrame by 'word' column in ascending order
df = df.sort_values(by=['word'])
# Resetting the index
df = df.reset_index(drop=True)
similar_words = None
selected_word = ""
user_selected_word = ""
# Check if a search_word has been entered
if search_word:
search_word_lower = search_word.lower()
# Find the exact match in the "translation" field where the searched word is the first word
exact_match_translation = df[df['translation'].apply(lambda translation: translation.lower().split()[0] == search_word_lower)]['word'].values
if len(exact_match_translation) > 0:
selected_word = exact_match_translation[0]
else:
# Find the word in the "meanings" field where the searched word is present
similar_words_meanings = df[df['meanings'].apply(lambda meanings: any(search_word_lower in meanings.lower().split() for meanings in meanings))]['word'].values
if len(similar_words_meanings) > 0:
selected_word = similar_words_meanings[0]
if not selected_word:
st.sidebar.write("No similar word found.")
if selected_word:
# Get index of the selected word in dataframe
selected_word_index = df.index[df['word'] == selected_word].tolist()[0]
# Get next 19 words after the selected word
next_words = df.iloc[selected_word_index+1 : selected_word_index+20]['word'].values
# Combine the selected word with next 19 words
combined_words = np.concatenate(([selected_word], next_words))
user_selected_word = st.sidebar.selectbox("Similar words:", combined_words)
# Display word details in sidebar
if selected_word:
display_word_details_main(selected_word, all_word_details, tts_settings, sidebar)
# TTS service selection in the sidebar
tts_service = sidebar.selectbox("Select a TTS service", ['gtts'], key="tts_service_selectbox", index=0)
tts_settings["tts_audio"] = tts_service if tts_service else 'gtts'
# Display selected word below submit button
st.sidebar.markdown(f"Selected word: **{selected_word}**")
# Submit button in the sidebar
submit_button = sidebar.button(
f"Generate Story about **{selected_word}**",
help="Click to generate the story",
key="submit_button",
args=(selected_word, all_word_details, tts_service),
kwargs={"generate_images": selected_word == "Submit with Images"},
type="primary"
)
# Generate and display story, audio, and images
if submit_button:
st.info("Generating the story... This may take a minute. It's worth the wait!") # <--- Display info message
jsonl_file = "mikmaq_semantic.jsonl"
themes = load_theme_and_story(jsonl_file)
word_details = get_user_inputs(selected_word, all_word_details)
if word_details is not None:
meaning = word_details.get('meanings', [])[0] if word_details.get('meanings') else ""
theme, story_word, image_theme = get_theme_and_story_word(themes, selected_word, meaning)
story_text, _, _ = generate_story(word_details, theme, story_word, image_theme)
try:
audio = generate_audio(story_text, tts_service)
display_story_and_audio(story_text, audio)
except Exception as e:
st.error(f"An error occurred: {e}")
generate_and_display_images(story_text, image_theme)
st.empty() # <--- Removes info message
@st.cache_data
def get_tts_service(tts_settings: dict) -> str:
"""
Function to determine the appropriate Text-to-Speech service based on the settings.
Args:
tts_settings (dict): Text-to-Speech settings.
Returns:
str: Name of the Text-to-Speech service.
"""
tts_service = next((service for service, flag in tts_settings.items() if flag.lower() == 'yes'), 'gtts')
return tts_service
def get_theme_and_story_word(themes: Optional[List[Dict[str, str]]], selected_word: str, meaning: str) -> Tuple[str, str, str]:
"""
Function to handle theme and story word selection.
"""
if not themes:
raise ValueError("No themes provided.")
selected_theme = select_random_theme(themes)
return replace_placeholders_in_theme(selected_theme, selected_word, meaning)
def select_random_theme(themes: List[Dict[str, str]]) -> Dict[str, str]:
"""
Selects a random theme from the provided list.
Args:
themes (List[Dict[str, str]]): The list of themes to choose from.
Returns:
Dict[str, str]: The selected theme.
"""
return random.choice(themes)
def replace_placeholders_in_theme(theme: Dict[str, str], word: str, meaning: str) -> Tuple[str, str, str]:
"""
Replaces placeholders in a theme with the provided word and meaning.
Args:
theme (Dict[str, str]): The theme.
word (str): The word to replace the '{word}' placeholder.
meaning (str): The meaning to replace the '{meaning}' placeholder.
Returns:
Tuple[str, str, str]: A tuple containing the theme, story word, and image theme.
Raises:
KeyError: If a required key is missing from the theme.
"""
try:
theme_text = theme['theme'].replace('{word}', word).replace('{meaning}', meaning)
story_word = theme['story_word']
image_theme = theme['image_theme'].replace('{word}', word).replace('{meaning}', meaning)
except KeyError as e:
raise KeyError(f"Required key missing from theme: {str(e)}")
return theme_text, story_word, image_theme
def generate_story(all_word_details: dict, theme: str, story_word: str, image_theme: str) -> Tuple[str, str, str]:
"""
Function to generate a story using OpenAI's GPT-4 model. Interacts with the OpenAI API to create a conversation
and uses the returned message content as the generated story.
Args:
all_word_details (dict): Dictionary of all word details.
theme (str): The theme for the story.
story_word (str): The main word for the story.
image_theme (str): The theme for the image.
Returns:
str: The generated story text.
str: The main word for the story.
str: The theme for the image.
"""
# The model used for generating the story is retrieved from environment variables.
# If there is no such variable defined, use "gpt-4-0613" as the default model.
# You may want to replace this with a different model name if a new version is available.
model = os.getenv("CHAT_MODEL_SELECTION", "gpt-4-0613")
if model == "gpt-4-0613":
st.info("Environment variable for MODEL_SELECTION is not set, using default model: gpt-4-0613")
# Define the system's role and content. The system plays the role of a Mi'kmaq storyteller.
prompt_system = {
'role': 'system',
'content': "You are a Mi'kmaq storyteller, or an 'a'tugwewinu', skilled in weaving captivating tales about the Mi'kmaq people and their rich cultural heritage."
}
# Define the initial part of the story as an 'Assistant' message.
initial_story = {
'role': 'assistant',
'content': f"Let us embark on a journey with a theme of '{theme}', centered around the word '{story_word}'."
}
# Create a conversation with OpenAI's Chat models.
# Parameters like max_tokens, temperature, top_p, frequency_penalty, and presence_penalty can be tweaked
# to adjust the output from the model.
response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=[prompt_system, initial_story],
max_tokens=2000, # Maximum length of the generated text. Consider adjusting this for longer/shorter outputs.
temperature=0.5, # Controls the randomness of the output. Higher values (closer to 1) make output more random.
top_p=1.0, # Controls the nucleus sampling. Lower values can make the output more focused.
frequency_penalty=0.5, # Controls penalizing new tokens based on their frequency.
presence_penalty=0.5, # Controls penalizing new tokens based on their presence.
)
# This is the generated story text.
story_text = response['choices'][0]['message']['content']
# Return the generated story, story word, and image theme.
return story_text, story_word, image_theme
def process_story_generation(selected_word: str, all_word_details: dict, generate_images: bool, tts_settings: dict) -> None:
"""
Function to generate and display a story, its audio, and images.
Args:
selected_word (str): The selected word for the story.
all_word_details (dict): Dictionary of all word details.
generate_images (bool): Flag to decide if images need to be generated.
tts_settings (dict): Text-to-Speech settings.
"""
if not selected_word:
st.info("Please enter a word for the story")
return
with st.spinner("Generating story..."):
try:
word_detail = all_word_details[selected_word]
meaning = word_detail.get('meanings', [])[0] # get the first meaning
themes = load_theme_and_story("mikmaq_semantic.jsonl")
theme, story_word, image_theme = get_theme_and_story_word(themes, selected_word, meaning)
story_text, _, _ = generate_story(selected_word, theme, story_word, image_theme)
tts_service = get_tts_service(tts_settings)
audio = generate_audio(story_text, tts_service)
display_story_and_audio(story_text, audio)
st.success("Story generation completed!")
except Exception as e:
st.error(f"Error in generating story or audio: {str(e)}")
if generate_images:
generate_and_display_images(story_text, image_theme)
def display_story_and_audio(story_text: str, audio: Optional[str]) -> None:
"""
Function to display the story text and its audio.
Args:
story_text (str): The generated story text.
audio (Optional[str]): Path to the audio file.
"""
story_container = st.container()
with story_container:
st.markdown(
f"<div style='background-color: #2f4f4f; padding: 20px; border-radius: 10px; box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);'>"
f"<p style='font-size: 18px; font-weight: bold; margin-bottom: 10px;'>Story text:</p>"
f"<p style='font-size: 16px; line-height: 1.5;'>{story_text}</p>"
"</div>",
unsafe_allow_html=True
)
if audio is not None and os.path.isfile(audio):
with open(audio, 'rb') as audio_file:
audio_bytes = audio_file.read()
st.audio(audio_bytes, format='audio/wav')
os.remove(audio) # Delete the temporary audio file after playing
def generate_and_display_images(story_text: str, image_theme: str) -> None:
"""
Function to generate and display images for different parts of the story.
Args:
story_text (str): The generated story text.
image_theme (str): The theme for the image.
"""
story_parts = [
story_text[:len(story_text) // 3],
story_text[len(story_text) // 3: 2 * len(story_text) // 3],
story_text[2 * len(story_text) // 3:],
]
image_container = st.container()
for i, part in enumerate(story_parts):
image_url = generate_openai_images(part + ' ' + image_theme)
if image_url:
image_container.image(image_url, width=None, clamp=True, channels="RGB", output_format="png") # image_container used directly
else:
image_container.markdown("**Image generation failed.**") # image_container used directly
def get_user_inputs(selected_word: str, all_word_details: dict) -> Optional[Dict]:
"""
Function to validate and return word details based on user inputs.
Args:
selected_word (str): The word selected by the user.
all_word_details (dict): Dictionary of all word details.
Returns:
Optional[Dict]: Details of the selected word, None if not present.
"""
# Check if selected_word is in all_word_details
if selected_word not in all_word_details:
st.error(f"The word {selected_word} does not exist in the dictionary.")
return None
return all_word_details[selected_word]
def display_word_details_main(selected_word: str, all_word_details: dict, tts_settings: dict, sidebar) -> None:
"""
Function to display the details of a selected word.
Args:
selected_word (str): The word selected by the user.
all_word_details (dict): Dictionary of all word details.
tts_settings (dict): Text-to-Speech settings.
sidebar (Streamlit Sidebar): The sidebar object for output.
"""
word_detail = get_user_inputs(selected_word, all_word_details)
if word_detail is None:
return
# Display word details
sidebar.markdown(f"<h3 style='color: crimson;'><span style='font-size: 28px; font-weight: bold;'>{selected_word}</span></h3>", unsafe_allow_html=True) # changed to sidebar
sidebar.markdown(f"<div style='color:CornflowerBlue'>Pronunciation guide: {word_detail.get('pronunciation', '')}</div>", unsafe_allow_html=True) # changed to sidebar
sidebar.markdown(f"Part of speech: {word_detail.get('part_of_speech', '')}") # changed to sidebar
# Display meanings
meanings = word_detail.get('meanings', [])
sidebar.markdown("Meanings:") # changed to sidebar
for meanings in meanings:
sidebar.markdown(f"- {meanings}") # changed to sidebar
# Display example sentences
example_sentences = word_detail.get('example', [])
sidebar.markdown("Example of word used in sentence:") # changed to sidebar
for example_sentence in example_sentences:
sidebar.markdown(f"- {example_sentence}") # changed to sidebar
# Display pronunciation
sidebar.markdown("Listen to pronunciation:") # changed to sidebar
tts_service = tts_settings.get('tts_audio', 'gtts')
audio = generate_audio(selected_word, tts_service)
if os.path.isfile(audio):
audio_file = open(audio, 'rb')
audio_bytes = audio_file.read()
sidebar.audio(audio_bytes, format='audio/wav') # changed to sidebar
os.remove(audio) # Delete the temporary audio file after playing
def main():
"""
The main function of the application.
"""
global_vars = load_env_variables_and_data()
# You can use the global_vars dictionary to access the required values.
api_keys = global_vars['api_keys']
tts_settings = global_vars['tts_settings']
local_data_files = global_vars['local_data_files']
models = global_vars['models']
completion_model = global_vars['completion_model']
all_word_details = global_vars['all_word_details']
trained_data = global_vars['trained_data']
trained_data_embeddings = global_vars['trained_data_embeddings']
word_details_embeddings = global_vars['word_details_embeddings']
render_ui(CUSTOM_CSS)
menu_options = {
"Storyteller": lambda: main_application(global_vars),
"Lnu-AI Chat": lambda: chatbot_application(models, trained_data, local_data_files, tts_settings),
"Examine Words": explore_words,
"Language Preservation": display_mikmaq_resources,
}
selected_option = render_menu(list(menu_options.keys()))
if selected_option and menu_options.get(selected_option):
menu_options[selected_option]()
def render_ui(CUSTOM_CSS):
"""
Renders the user interface components.
"""
st.markdown(CUSTOM_CSS, unsafe_allow_html=True)
st.markdown('<h1 style="text-align: center; color: Crimson; margin-top: -70px;">Lnu-AI</h1>', unsafe_allow_html=True)
st.markdown('<h3 style="text-align: center;">An Indigenous AI System</h3>', unsafe_allow_html=True)
st.divider()
def render_menu(options: list) -> str:
"""
Renders the menu with options.
Args:
options (list): A list of options to be displayed in the menu.
Returns:
str: The selected option from the menu.
"""
icons = ["book", "chat", "puzzle fill", "archive"]
return option_menu(None, options, icons=icons, menu_icon="cast", default_index=0, orientation="horizontal")
if __name__ == "__main__":
main()
#wela'lin | [
"{'model': 'image-alpha-001', 'prompt': PLACEHOLDER}",
"You are Lnu-AI, an AI developed to promote and preserve the Mi'kmaq language and culture.",
"PLACEHOLDER PLACEHOLDER",
"Let us embark on a journey with a theme of 'PLACEHOLDER', centered around the word 'PLACEHOLDER'.",
"[PLACEHOLDER, PLACEHOLDER]",
"{'role': 'system', 'content': \"You are a Mi'kmaq storyteller, or an 'a'tugwewinu', skilled in weaving captivating tales about the Mi'kmaq people and their rich cultural heritage.\"}",
"You are a Mi'kmaq storyteller, or an 'a'tugwewinu', skilled in weaving captivating tales about the Mi'kmaq people and their rich cultural heritage.",
"{'prompt': PLACEHOLDER, 'completion': PLACEHOLDER}"
] |
2024-01-10 | kaixindelele/gpt_academic_bk | request_llm~bridge_claude.py | # 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
"""
该文件中主要包含2个函数
不具备多线程能力的函数:
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
具备多线程调用能力的函数
2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
"""
import os
import json
import time
import gradio as gr
import logging
import traceback
import requests
import importlib
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
from toolbox import get_conf, update_ui, trimmed_format_exc, ProxyNetworkActivate
proxies, TIMEOUT_SECONDS, MAX_RETRY, ANTHROPIC_API_KEY = \
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'ANTHROPIC_API_KEY')
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
def get_full_error(chunk, stream_response):
"""
获取完整的从Openai返回的报错
"""
while True:
try:
chunk += next(stream_response)
except:
break
return chunk
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
"""
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs:
是本次问询的输入
sys_prompt:
系统静默prompt
llm_kwargs:
chatGPT的内部调优参数
history:
是之前的对话列表
observe_window = None:
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
"""
from anthropic import Anthropic
watch_dog_patience = 15 # 看门狗的耐心, 设置5秒即可
prompt = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
retry = 0
if len(ANTHROPIC_API_KEY) == 0:
raise RuntimeError("没有设置ANTHROPIC_API_KEY选项")
while True:
try:
# make a POST request to the API endpoint, stream=False
from .bridge_all import model_info
anthropic = Anthropic(api_key=ANTHROPIC_API_KEY)
# endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
# with ProxyNetworkActivate()
stream = anthropic.completions.create(
prompt=prompt,
max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping.
model=llm_kwargs['llm_model'],
stream=True,
temperature = llm_kwargs['temperature']
)
break
except Exception as e:
retry += 1
traceback.print_exc()
if retry > MAX_RETRY: raise TimeoutError
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
result = ''
try:
for completion in stream:
result += completion.completion
if not console_slience: print(completion.completion, end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1: observe_window[0] += completion.completion
# 看门狗,如果超过期限没有喂狗,则终止
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。")
except Exception as e:
traceback.print_exc()
return result
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
发送至chatGPT,流式获取输出。
用于基础的对话功能。
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
from anthropic import Anthropic
if len(ANTHROPIC_API_KEY) == 0:
chatbot.append((inputs, "没有设置ANTHROPIC_API_KEY"))
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
return
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
raw_input = inputs
logging.info(f'[raw_input] {raw_input}')
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
try:
prompt = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
except RuntimeError as e:
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
return
history.append(inputs); history.append("")
retry = 0
while True:
try:
# make a POST request to the API endpoint, stream=True
from .bridge_all import model_info
anthropic = Anthropic(api_key=ANTHROPIC_API_KEY)
# endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
# with ProxyNetworkActivate()
stream = anthropic.completions.create(
prompt=prompt,
max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping.
model=llm_kwargs['llm_model'],
stream=True,
temperature = llm_kwargs['temperature']
)
break
except:
retry += 1
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
if retry > MAX_RETRY: raise TimeoutError
gpt_replying_buffer = ""
for completion in stream:
try:
gpt_replying_buffer = gpt_replying_buffer + completion.completion
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history, msg='正常') # 刷新界面
except Exception as e:
from toolbox import regular_txt_to_markdown
tb_str = '```\n' + trimmed_format_exc() + '```'
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str}")
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + tb_str) # 刷新界面
return
# https://github.com/jtsang4/claude-to-chatgpt/blob/main/claude_to_chatgpt/adapter.py
def convert_messages_to_prompt(messages):
prompt = ""
role_map = {
"system": "Human",
"user": "Human",
"assistant": "Assistant",
}
for message in messages:
role = message["role"]
content = message["content"]
transformed_role = role_map[role]
prompt += f"\n\n{transformed_role.capitalize()}: {content}"
prompt += "\n\nAssistant: "
return prompt
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"""
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
"""
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
conversation_cnt = len(history) // 2
messages = [{"role": "system", "content": system_prompt}]
if conversation_cnt:
for index in range(0, 2*conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = "user"
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = "assistant"
what_gpt_answer["content"] = history[index+1]
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "": continue
if what_gpt_answer["content"] == timeout_bot_msg: continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
prompt = convert_messages_to_prompt(messages)
return prompt
| [
"\n\nAssistant: "
] |
2024-01-10 | kaixindelele/gpt_academic_bk | crazy_functions~crazy_utils.py | from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder
import threading
import os
def input_clipping(inputs, history, max_token_limit):
import numpy as np
from request_llm.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
mode = 'input-and-history'
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
input_token_num = get_token_num(inputs)
if input_token_num < max_token_limit//2:
mode = 'only-history'
max_token_limit = max_token_limit - input_token_num
everything = [inputs] if mode == 'input-and-history' else ['']
everything.extend(history)
n_token = get_token_num('\n'.join(everything))
everything_token = [get_token_num(e) for e in everything]
delta = max(everything_token) // 16 # 截断时的颗粒度
while n_token > max_token_limit:
where = np.argmax(everything_token)
encoded = enc.encode(everything[where], disallowed_special=())
clipped_encoded = encoded[:len(encoded)-delta]
everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
everything_token[where] = get_token_num(everything[where])
n_token = get_token_num('\n'.join(everything))
if mode == 'input-and-history':
inputs = everything[0]
else:
pass
history = everything[1:]
return inputs, history
def request_gpt_model_in_new_thread_with_ui_alive(
inputs, inputs_show_user, llm_kwargs,
chatbot, history, sys_prompt, refresh_interval=1,
handle_token_exceed=True,
retry_times_at_unknown_error=2,
):
"""
Request GPT model,请求GPT模型同时维持用户界面活跃。
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
inputs (string): List of inputs (输入)
inputs_show_user (string): List of inputs to show user(展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
top_p (float): Top p value for sampling from model distribution (GPT参数,浮点数)
temperature (float): Temperature value for sampling from model distribution(GPT参数,浮点数)
chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
history (list): List of chat history (历史,对话历史列表)
sys_prompt (string): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
retry_times_at_unknown_error:失败时的重试次数
输出 Returns:
future: 输出,GPT返回的结果
"""
import time
from concurrent.futures import ThreadPoolExecutor
from request_llm.bridge_all import predict_no_ui_long_connection
# 用户反馈
chatbot.append([inputs_show_user, ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
executor = ThreadPoolExecutor(max_workers=16)
mutable = ["", time.time(), ""]
# 看门狗耐心
watch_dog_patience = 30
# 请求任务
def _req_gpt(inputs, history, sys_prompt):
retry_op = retry_times_at_unknown_error
exceeded_cnt = 0
while True:
# watchdog error
if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience:
raise RuntimeError("检测到程序终止。")
try:
# 【第一种情况】:顺利完成
result = predict_no_ui_long_connection(
inputs=inputs, llm_kwargs=llm_kwargs,
history=history, sys_prompt=sys_prompt, observe_window=mutable)
return result
except ConnectionAbortedError as token_exceeded_error:
# 【第二种情况】:Token溢出
if handle_token_exceed:
exceeded_cnt += 1
# 【选择处理】 尝试计算比例,尽可能多地保留文本
from toolbox import get_reduce_token_percent
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
MAX_TOKEN = 4096
EXCEED_ALLO = 512 + 512 * exceeded_cnt
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
continue # 返回重试
else:
# 【选择放弃】
tb_str = '```\n' + trimmed_format_exc() + '```'
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
return mutable[0] # 放弃
except:
# 【第三种情况】:其他错误:重试几次
tb_str = '```\n' + trimmed_format_exc() + '```'
print(tb_str)
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
if retry_op > 0:
retry_op -= 1
mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}:\n\n"
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
time.sleep(30)
time.sleep(5)
continue # 返回重试
else:
time.sleep(5)
return mutable[0] # 放弃
# 提交任务
future = executor.submit(_req_gpt, inputs, history, sys_prompt)
while True:
# yield一次以刷新前端页面
time.sleep(refresh_interval)
# “喂狗”(看门狗)
mutable[1] = time.time()
if future.done():
break
chatbot[-1] = [chatbot[-1][0], mutable[0]]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
final_result = future.result()
chatbot[-1] = [chatbot[-1][0], final_result]
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
return final_result
def can_multi_process(llm):
if llm.startswith('gpt-'): return True
if llm.startswith('api2d-'): return True
if llm.startswith('azure-'): return True
return False
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array, inputs_show_user_array, llm_kwargs,
chatbot, history_array, sys_prompt_array,
refresh_interval=1, max_workers=-1, scroller_max_len=30,
handle_token_exceed=True, show_user_at_complete=False,
retry_times_at_unknown_error=2,
):
"""
Request GPT model using multiple threads with UI and high efficiency
请求GPT模型的[多线程]版。
具备以下功能:
实时在UI上反馈远程数据流
使用线程池,可调节线程池的大小避免openai的流量限制错误
处理中途中止的情况
网络等出问题时,会把traceback和已经接收的数据转入输出
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
inputs_array (list): List of inputs (每个子任务的输入)
inputs_show_user_array (list): List of inputs to show user(每个子任务展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
llm_kwargs: llm_kwargs参数
chatbot: chatbot (用户界面对话窗口句柄,用于数据流可视化)
history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史)
sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
max_workers (int, optional): Maximum number of threads (default: see config.py) (最大线程数,如果子任务非常多,需要用此选项防止高频地请求openai导致错误)
scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果)
handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本)
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
show_user_at_complete (bool, optional): (在结束时,把完整输入-输出结果显示在聊天框)
retry_times_at_unknown_error:子任务失败时的重试次数
输出 Returns:
list: List of GPT model responses (每个子任务的输出汇总,如果某个子任务出错,response中会携带traceback报错信息,方便调试和定位问题。)
"""
import time, random
from concurrent.futures import ThreadPoolExecutor
from request_llm.bridge_all import predict_no_ui_long_connection
assert len(inputs_array) == len(history_array)
assert len(inputs_array) == len(sys_prompt_array)
if max_workers == -1: # 读取配置文件
try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
except: max_workers = 8
if max_workers <= 0: max_workers = 3
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
if not can_multi_process(llm_kwargs['llm_model']):
max_workers = 1
executor = ThreadPoolExecutor(max_workers=max_workers)
n_frag = len(inputs_array)
# 用户反馈
chatbot.append(["请开始多线程操作。", ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
# 跨线程传递
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
# 看门狗耐心
watch_dog_patience = 30
# 子线程任务
def _req_gpt(index, inputs, history, sys_prompt):
gpt_say = ""
retry_op = retry_times_at_unknown_error
exceeded_cnt = 0
mutable[index][2] = "执行中"
while True:
# watchdog error
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience:
mutable[index][2] = "与服务器断开连接"
raise RuntimeError("检测到程序终止。")
try:
# 【第一种情况】:顺利完成
# time.sleep(10); raise RuntimeError("测试")
gpt_say = predict_no_ui_long_connection(
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
)
mutable[index][2] = "已成功"
return gpt_say
except ConnectionAbortedError as token_exceeded_error:
# 【第二种情况】:Token溢出,
if handle_token_exceed:
exceeded_cnt += 1
# 【选择处理】 尝试计算比例,尽可能多地保留文本
from toolbox import get_reduce_token_percent
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
MAX_TOKEN = 4096
EXCEED_ALLO = 512 + 512 * exceeded_cnt
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
mutable[index][2] = f"截断重试"
continue # 返回重试
else:
# 【选择放弃】
tb_str = '```\n' + trimmed_format_exc() + '```'
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
mutable[index][2] = "输入过长已放弃"
return gpt_say # 放弃
except:
# 【第三种情况】:其他错误
tb_str = '```\n' + trimmed_format_exc() + '```'
print(tb_str)
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
if retry_op > 0:
retry_op -= 1
wait = random.randint(5, 20)
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
wait = wait * 3
fail_info = "OpenAI绑定信用卡可解除频率限制 "
else:
fail_info = ""
# 也许等待十几秒后,情况会好转
for i in range(wait):
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
# 开始重试
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
continue # 返回重试
else:
mutable[index][2] = "已失败"
wait = 5
time.sleep(5)
return gpt_say # 放弃
# 异步任务开始
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
cnt = 0
while True:
# yield一次以刷新前端页面
time.sleep(refresh_interval)
cnt += 1
worker_done = [h.done() for h in futures]
# 更好的UI视觉效果
observe_win = []
# 每个线程都要“喂狗”(看门狗)
for thread_index, _ in enumerate(worker_done):
mutable[thread_index][1] = time.time()
# 在前端打印些好玩的东西
for thread_index, _ in enumerate(worker_done):
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
replace('\n', '').replace('`', '.').replace(
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
observe_win.append(print_something_really_funny)
# 在前端打印些好玩的东西
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
if not done else f'`{mutable[thread_index][2]}`\n\n'
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
# 在前端打印些好玩的东西
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
if all(worker_done):
executor.shutdown()
break
# 异步任务结束
gpt_response_collection = []
for inputs_show_user, f in zip(inputs_show_user_array, futures):
gpt_res = f.result()
gpt_response_collection.extend([inputs_show_user, gpt_res])
# 是否在结束时,在界面上显示结果
if show_user_at_complete:
for inputs_show_user, f in zip(inputs_show_user_array, futures):
gpt_res = f.result()
chatbot.append([inputs_show_user, gpt_res])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
time.sleep(1)
return gpt_response_collection
def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
def cut(txt_tocut, must_break_at_empty_line): # 递归
if get_token_fn(txt_tocut) <= limit:
return [txt_tocut]
else:
lines = txt_tocut.split('\n')
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
estimated_line_cut = int(estimated_line_cut)
for cnt in reversed(range(estimated_line_cut)):
if must_break_at_empty_line:
if lines[cnt] != "":
continue
print(cnt)
prev = "\n".join(lines[:cnt])
post = "\n".join(lines[cnt:])
if get_token_fn(prev) < limit:
break
if cnt == 0:
raise RuntimeError("存在一行极长的文本!")
# print(len(post))
# 列表递归接龙
result = [prev]
result.extend(cut(post, must_break_at_empty_line))
return result
try:
return cut(txt, must_break_at_empty_line=True)
except RuntimeError:
return cut(txt, must_break_at_empty_line=False)
def force_breakdown(txt, limit, get_token_fn):
"""
当无法用标点、空行分割时,我们用最暴力的方法切割
"""
for i in reversed(range(len(txt))):
if get_token_fn(txt[:i]) < limit:
return txt[:i], txt[i:]
return "Tiktoken未知错误", "Tiktoken未知错误"
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
# 递归
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
if get_token_fn(txt_tocut) <= limit:
return [txt_tocut]
else:
lines = txt_tocut.split('\n')
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
estimated_line_cut = int(estimated_line_cut)
cnt = 0
for cnt in reversed(range(estimated_line_cut)):
if must_break_at_empty_line:
if lines[cnt] != "":
continue
prev = "\n".join(lines[:cnt])
post = "\n".join(lines[cnt:])
if get_token_fn(prev) < limit:
break
if cnt == 0:
if break_anyway:
prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
else:
raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
# print(len(post))
# 列表递归接龙
result = [prev]
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
return result
try:
# 第1次尝试,将双空行(\n\n)作为切分点
return cut(txt, must_break_at_empty_line=True)
except RuntimeError:
try:
# 第2次尝试,将单空行(\n)作为切分点
return cut(txt, must_break_at_empty_line=False)
except RuntimeError:
try:
# 第3次尝试,将英文句号(.)作为切分点
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
return [r.replace('。\n', '.') for r in res]
except RuntimeError as e:
try:
# 第4次尝试,将中文句号(。)作为切分点
res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
return [r.replace('。。\n', '。') for r in res]
except RuntimeError as e:
# 第5次尝试,没办法了,随便切一下敷衍吧
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
def read_and_clean_pdf_text(fp):
"""
这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好
**输入参数说明**
- `fp`:需要读取和清理文本的pdf文件路径
**输出参数说明**
- `meta_txt`:清理后的文本内容字符串
- `page_one_meta`:第一页清理后的文本内容列表
**函数功能**
读取pdf文件并清理其中的文本内容,清理规则包括:
- 提取所有块元的文本信息,并合并为一个字符串
- 去除短块(字符数小于100)并替换为回车符
- 清理多余的空行
- 合并小写字母开头的段落块并替换为空格
- 清除重复的换行
- 将每个换行符替换为两个换行符,使每个段落之间有两个换行符分隔
"""
import fitz, copy
import re
import numpy as np
from colorful import print亮黄, print亮绿
fc = 0 # Index 0 文本
fs = 1 # Index 1 字体
fb = 2 # Index 2 框框
REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的?时,判定为不是正文(有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
def primary_ffsize(l):
"""
提取文本块主字体
"""
fsize_statiscs = {}
for wtf in l['spans']:
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
fsize_statiscs[wtf['size']] += len(wtf['text'])
return max(fsize_statiscs, key=fsize_statiscs.get)
def ffsize_same(a,b):
"""
提取字体大小是否近似相等
"""
return abs((a-b)/max(a,b)) < 0.02
with fitz.open(fp) as doc:
meta_txt = []
meta_font = []
meta_line = []
meta_span = []
############################## <第 1 步,搜集初始信息> ##################################
for index, page in enumerate(doc):
# file_content += page.get_text()
text_areas = page.get_text("dict") # 获取页面上的文本信息
for t in text_areas['blocks']:
if 'lines' in t:
pf = 998
for l in t['lines']:
txt_line = "".join([wtf['text'] for wtf in l['spans']])
if len(txt_line) == 0: continue
pf = primary_ffsize(l)
meta_line.append([txt_line, pf, l['bbox'], l])
for wtf in l['spans']: # for l in t['lines']:
meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
# meta_line.append(["NEW_BLOCK", pf])
# 块元提取 for each word segment with in line for each line cross-line words for each block
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
'- ', '') for t in text_areas['blocks'] if 'lines' in t])
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
if index == 0:
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
############################## <第 2 步,获取正文主字体> ##################################
try:
fsize_statiscs = {}
for span in meta_span:
if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
fsize_statiscs[span[1]] += span[2]
main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
if REMOVE_FOOT_NOTE:
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
except:
raise RuntimeError(f'抱歉, 我们暂时无法解析此PDF文档: {fp}。')
############################## <第 3 步,切分和重新整合> ##################################
mega_sec = []
sec = []
for index, line in enumerate(meta_line):
if index == 0:
sec.append(line[fc])
continue
if REMOVE_FOOT_NOTE:
if meta_line[index][fs] <= give_up_fize_threshold:
continue
if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]):
# 尝试识别段落
if meta_line[index][fc].endswith('.') and\
(meta_line[index-1][fc] != 'NEW_BLOCK') and \
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7:
sec[-1] += line[fc]
sec[-1] += "\n\n"
else:
sec[-1] += " "
sec[-1] += line[fc]
else:
if (index+1 < len(meta_line)) and \
meta_line[index][fs] > main_fsize:
# 单行 + 字体大
mega_sec.append(copy.deepcopy(sec))
sec = []
sec.append("# " + line[fc])
else:
# 尝试识别section
if meta_line[index-1][fs] > meta_line[index][fs]:
sec.append("\n" + line[fc])
else:
sec.append(line[fc])
mega_sec.append(copy.deepcopy(sec))
finals = []
for ms in mega_sec:
final = " ".join(ms)
final = final.replace('- ', ' ')
finals.append(final)
meta_txt = finals
############################## <第 4 步,乱七八糟的后处理> ##################################
def 把字符太少的块清除为回车(meta_txt):
for index, block_txt in enumerate(meta_txt):
if len(block_txt) < 100:
meta_txt[index] = '\n'
return meta_txt
meta_txt = 把字符太少的块清除为回车(meta_txt)
def 清理多余的空行(meta_txt):
for index in reversed(range(1, len(meta_txt))):
if meta_txt[index] == '\n' and meta_txt[index-1] == '\n':
meta_txt.pop(index)
return meta_txt
meta_txt = 清理多余的空行(meta_txt)
def 合并小写开头的段落块(meta_txt):
def starts_with_lowercase_word(s):
pattern = r"^[a-z]+"
match = re.match(pattern, s)
if match:
return True
else:
return False
for _ in range(100):
for index, block_txt in enumerate(meta_txt):
if starts_with_lowercase_word(block_txt):
if meta_txt[index-1] != '\n':
meta_txt[index-1] += ' '
else:
meta_txt[index-1] = ''
meta_txt[index-1] += meta_txt[index]
meta_txt[index] = '\n'
return meta_txt
meta_txt = 合并小写开头的段落块(meta_txt)
meta_txt = 清理多余的空行(meta_txt)
meta_txt = '\n'.join(meta_txt)
# 清除重复的换行
for _ in range(5):
meta_txt = meta_txt.replace('\n\n', '\n')
# 换行 -> 双换行
meta_txt = meta_txt.replace('\n', '\n\n')
############################## <第 5 步,展示分割效果> ##################################
# for f in finals:
# print亮黄(f)
# print亮绿('***************************')
return meta_txt, page_one_meta
def get_files_from_everything(txt, type): # type='.md'
"""
这个函数是用来获取指定目录下所有指定类型(如.md)的文件,并且对于网络上的文件,也可以获取它。
下面是对每个参数和返回值的说明:
参数
- txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。
- type: 字符串,表示要搜索的文件类型。默认是.md。
返回值
- success: 布尔值,表示函数是否成功执行。
- file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。
- project_folder: 字符串,表示文件所在的文件夹路径。如果是网络上的文件,就是临时文件夹的路径。
该函数详细注释已添加,请确认是否满足您的需要。
"""
import glob, os
success = True
if txt.startswith('http'):
# 网络的远程文件
import requests
from toolbox import get_conf
from toolbox import get_log_folder, gen_time_str
proxies, = get_conf('proxies')
try:
r = requests.get(txt, proxies=proxies)
except:
raise ConnectionRefusedError(f"无法下载资源{txt},请检查。")
path = os.path.join(get_log_folder(plugin_name='web_download'), gen_time_str()+type)
with open(path, 'wb+') as f: f.write(r.content)
project_folder = get_log_folder(plugin_name='web_download')
file_manifest = [path]
elif txt.endswith(type):
# 直接给定文件
file_manifest = [txt]
project_folder = os.path.dirname(txt)
elif os.path.exists(txt):
# 本地路径,递归搜索
project_folder = txt
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*'+type, recursive=True)]
if len(file_manifest) == 0:
success = False
else:
project_folder = None
file_manifest = []
success = False
return success, file_manifest, project_folder
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class knowledge_archive_interface():
def __init__(self) -> None:
self.threadLock = threading.Lock()
self.current_id = ""
self.kai_path = None
self.qa_handle = None
self.text2vec_large_chinese = None
def get_chinese_text2vec(self):
if self.text2vec_large_chinese is None:
# < -------------------预热文本向量化模组--------------- >
from toolbox import ProxyNetworkActivate
print('Checking Text2vec ...')
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
with ProxyNetworkActivate(): # 临时地激活代理网络
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
return self.text2vec_large_chinese
def feed_archive(self, file_manifest, id="default"):
self.threadLock.acquire()
# import uuid
self.current_id = id
from zh_langchain import construct_vector_store
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
files=file_manifest,
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
def get_current_archive_id(self):
return self.current_id
def get_loaded_file(self):
return self.qa_handle.get_loaded_file()
def answer_with_archive_by_id(self, txt, id):
self.threadLock.acquire()
if not self.current_id == id:
self.current_id = id
from zh_langchain import construct_vector_store
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
files=[],
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
VECTOR_SEARCH_SCORE_THRESHOLD = 0
VECTOR_SEARCH_TOP_K = 4
CHUNK_SIZE = 512
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
query = txt,
vs_path = self.kai_path,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K,
chunk_conent=True,
chunk_size=CHUNK_SIZE,
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
return resp, prompt
@Singleton
class nougat_interface():
def __init__(self):
self.threadLock = threading.Lock()
def nougat_with_timeout(self, command, cwd, timeout=3600):
import subprocess
process = subprocess.Popen(command, shell=True, cwd=cwd)
try:
stdout, stderr = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
print("Process timed out!")
return False
return True
def NOUGAT_parse_pdf(self, fp):
self.threadLock.acquire()
import glob, threading, os
from toolbox import get_log_folder, gen_time_str
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
os.makedirs(dst)
self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd())
res = glob.glob(os.path.join(dst,'*.mmd'))
if len(res) == 0:
raise RuntimeError("Nougat解析论文失败。")
self.threadLock.release()
return res[0]
def try_install_deps(deps, reload_m=[]):
import subprocess, sys, importlib
for dep in deps:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', dep])
import site
importlib.reload(site)
for m in reload_m:
importlib.reload(__import__(m))
HTML_CSS = """
.row {
display: flex;
flex-wrap: wrap;
}
.column {
flex: 1;
padding: 10px;
}
.table-header {
font-weight: bold;
border-bottom: 1px solid black;
}
.table-row {
border-bottom: 1px solid lightgray;
}
.table-cell {
padding: 5px;
}
"""
TABLE_CSS = """
<div class="row table-row">
<div class="column table-cell">REPLACE_A</div>
<div class="column table-cell">REPLACE_B</div>
</div>
"""
class construct_html():
def __init__(self) -> None:
self.css = HTML_CSS
self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
def add_row(self, a, b):
tmp = TABLE_CSS
from toolbox import markdown_convertion
tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
self.html_string += tmp
def save_file(self, file_name):
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
f.write(self.html_string.encode('utf-8', 'ignore').decode())
return os.path.join(get_log_folder(), file_name)
| [] |
2024-01-10 | kaixindelele/gpt_academic_bk | request_llm~bridge_chatgpt.py | # 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
"""
该文件中主要包含三个函数
不具备多线程能力的函数:
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
具备多线程调用能力的函数
2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑
3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
"""
import json
import time
import gradio as gr
import logging
import traceback
import requests
import importlib
import requests
import openai
import pickle
import jieba
from flashtext import KeywordProcessor
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, RANDOM_LLM = \
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', "RANDOM_LLM")
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
def get_full_error(chunk, stream_response):
"""
获取完整的从Openai返回的报错
"""
while True:
try:
chunk += next(stream_response)
except:
break
return chunk
def decode_chunk(chunk):
# 提前读取一些信息 (用于判断异常)
chunk_decoded = chunk.decode()
chunkjson = None
has_choices = False
has_content = False
has_role = False
try:
chunkjson = json.loads(chunk_decoded[6:])
has_choices = 'choices' in chunkjson
if has_choices: has_content = "content" in chunkjson['choices'][0]["delta"]
if has_choices: has_role = "role" in chunkjson['choices'][0]["delta"]
except:
pass
return chunk_decoded, chunkjson, has_choices, has_content, has_role
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
"""
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs:
是本次问询的输入
sys_prompt:
系统静默prompt
llm_kwargs:
chatGPT的内部调优参数
history:
是之前的对话列表
observe_window = None:
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
"""
watch_dog_patience = 15 # 看门狗的耐心, 设置5秒即可
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
retry = 0
while True:
try:
# make a POST request to the API endpoint, stream=False
from .bridge_all import model_info
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
response = requests.post(endpoint, headers=headers, proxies=proxies,
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
except requests.exceptions.ReadTimeout as e:
retry += 1
traceback.print_exc()
if retry > MAX_RETRY: raise TimeoutError
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
stream_response = response.iter_lines()
result = ''
while True:
try: chunk = next(stream_response).decode()
except StopIteration:
break
except requests.exceptions.ConnectionError:
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
if len(chunk)==0: continue
if not chunk.startswith('data:'):
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
# 进入黑名单检测环节:
current_api_key = headers['Authorization'].split(' ')[-1]
print("current_api_key:", current_api_key)
add_black_list(error_msg, current_api_key)
# 这时候需要重新采样一个api_key
new_api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {new_api_key}"
}
if "reduce the length" in error_msg:
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
else:
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
if ('data: [DONE]' in chunk): break # api2d 正常完成
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
delta = json_data["delta"]
if len(delta) == 0: break
if "role" in delta: continue
if "content" in delta:
result += delta["content"]
if not console_slience: print(delta["content"], end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1: observe_window[0] += delta["content"]
# 看门狗,如果超过期限没有喂狗,则终止
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。")
else: raise RuntimeError("意外Json结构:"+delta)
if json_data['finish_reason'] == 'content_filter':
raise RuntimeError("由于提问含不合规内容被Azure过滤。")
if json_data['finish_reason'] == 'length':
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
return result
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
发送至chatGPT,流式获取输出。
用于基础的对话功能。
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
if is_any_api_key(inputs):
# 如果输入的刚好是api_key,那么就直接在这个对话里面,使用这个api_key
chatbot._cookies['api_key'] = inputs
chatbot.append(("输入已识别为openai的api_key", what_keys(inputs)))
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
return
elif not is_any_api_key(chatbot._cookies['api_key']):
chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
return
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
raw_input = inputs
logging.info(f'[raw_input] {raw_input}')
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
# check mis-behavior
if raw_input.startswith('private_upload/') and len(raw_input) == 34:
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需要点击“函数插件区”按钮进行处理,而不是点击“提交”按钮。")
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
time.sleep(2)
try:
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
except RuntimeError as e:
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
return
except AssertionError as e:
chatbot[-1] = (inputs, e.args[0])
yield from update_ui(chatbot=chatbot, history=history, msg="敏感词检测") # 刷新界面
return
history.append(inputs); history.append("")
retry = 0
while True:
try:
# make a POST request to the API endpoint, stream=True
from .bridge_all import model_info
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
response = requests.post(endpoint, headers=headers, proxies=proxies,
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
except:
retry += 1
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
if retry > MAX_RETRY: raise TimeoutError
gpt_replying_buffer = ""
is_head_of_the_stream = True
if stream:
stream_response = response.iter_lines()
while True:
try:
chunk = next(stream_response)
except StopIteration:
# 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里
chunk_decoded = chunk.decode()
error_msg = chunk_decoded
# 首先排除一个one-api没有done数据包的第三方Bug情形
if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0:
yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。")
break
# 其他情况,直接返回报错
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面
# 提前读取一些信息 (用于判断异常)
chunk_decoded, chunkjson, has_choices, has_content, has_role = decode_chunk(chunk)
return
chunk_decoded = chunk.decode()
if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r"content" not in chunk_decoded):
# 数据流的第一帧不携带content
is_head_of_the_stream = False; continue
if chunk:
try:
# 前者是API2D的结束条件,后者是OPENAI的结束条件
#if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
# 判定为数据流的结束,gpt_replying_buffer也写完了
logging.info(f'[response] {gpt_replying_buffer}')
break
# 处理数据流的主体
chunkjson = json.loads(chunk_decoded[6:])
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
if has_content:
# 正常情况
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
elif has_role:
# 一些第三方接口的出现这样的错误,兼容一下吧
continue
else:
# 一些垃圾第三方接口的出现这样的错误
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
# gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"]
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
except Exception as e:
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
chunk = get_full_error(chunk, stream_response)
chunk_decoded = chunk.decode()
error_msg = chunk_decoded
# 进入黑名单检测环节:
current_api_key = headers['Authorization'].split(' ')[-1]
print("current_api_key:", current_api_key)
add_black_list(error_msg, current_api_key)
# 这时候需要重新采样一个api_key
new_api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {new_api_key}"
}
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
print(error_msg)
return
history_text = history[-1]
print("chatbot_result:", history_text)
"""
thanks https://github.com/cjh0613/tencent-sensitive-words to provide sensitive words
我需要把这个敏感词库转成pkl文件,否则太恐怖了。
声明:我是爱国爱党爱人民的好同志,做这款产品的目的是为了让大家更好的使用学术版GPT,这些敏感词不代表我的任何想法。
"""
def tokenize_text(text):
seg_list = jieba.cut(text)
return ' '.join(seg_list)
def contains_sensitive_words(tokenized_text, sensitive_words_list):
keyword_processor = KeywordProcessor()
keyword_processor.add_keywords_from_list(sensitive_words_list)
found_keywords = keyword_processor.extract_keywords(tokenized_text)
return len(found_keywords) > 0, found_keywords
def check_sensitive(input, zz_sensitive_words, sq_sensitive_words, key, llm_kwargs):
# openai.api_key = key
messages = [
{"role": "system",
"content": """你是一个中国人,积极维护中国人民的利益和中国人民政府以及中国共产党。
在为用户提供学术服务之前,面对各种各样的输入时,你暂时充当一个敏感词检测员,你需要判断用户的输入问题是否涉及到政治,或者色情等不好的信息."""},
{"role": "assistant",
"content": """现在对于一个学术版的GPT网站,面对各种奇怪的用户输入,你需要先判定出用户的输入是否是纯学术上的讨论,不涉及政治和色情等敏感话题。
请帮忙自动判断用户的输入是否涉及到政治和色情等敏感话题。"""},
{"role": "user",
"content": f"""用户的输入文本内容:{input}
现在对于一个学术版的GPT网站,面对各种奇怪的用户输入,你需要先判定出用户的输入是否是纯学术上的讨论,不涉及政治和色情等敏感话题。
请帮忙自动判断用户的输入是否涉及到政治和色情等敏感话题:
输出格式如下:
如果输入和输出都涉及这两个敏感话题,尤其是涉及到挑衅中国国家利益,损害中国人民权益,玷污中国人民政府和共产党、妄议中国国家领导人等问题,请输出:
["reject"]
如果输入和输出不涉及这两个敏感话题,只涉及到一些学术问题,请输出:
["pass"]
提供部分敏感词库:
政治敏感词库:{zz_sensitive_words}
色情敏感词库:{sq_sensitive_words}
提供部分输出样例:
demo1:
"input": "我现在完全不想学习,想睡觉,想干有意思的事"
输出:["pass"]
demo2:
"input": "如何做爱?"
输出:["reject"]
demo3:
"input": "台湾"
输出:["reject"]
demo4:
"input": "如何部署一个网站"
输出:["pass"]
demo5:
"input": "中国政府今年带领中国人民取得出口贸易额成功突破35%的增长大关"
输出:["pass"]
demo5:
"input": "新疆棉花"
输出:["reject"]
demo6:
"input": "李克强"
输出:["reject"]
demo7:
"input": "武汉新冠肺炎"
输出:["reject"]
请记住其他一切可能导致不利于中国国家利益、损害中国人民权益(尤其是未成年人权益的),玷污中国人民政府和共产党的输出,都需要reject!
保留对学术问题的pass,对美好生活和日常工作和技术内容的pass.
最后,请记住,你只是做一个简单的填空题,输出只是根据上下文,选择["reject"] 或者 ["pass"],不能包含任何其他文本信息。
"""
} ,
]
watch_dog_patience = 15 # 看门狗的耐心, 设置5秒即可
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {key}"
}
payload = {
"model": llm_kwargs['llm_model'].strip('api2d-'),
"messages": messages,
"temperature": 0.0, # 1.0,
"top_p": 0.05, # 1.0,
"n": 1,
"stream": True,
"presence_penalty": 0,
"frequency_penalty": 0,
}
retry = 0
observe_window=None
console_slience=False
while True:
try:
# make a POST request to the API endpoint, stream=False
from .bridge_all import model_info
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
response = requests.post(endpoint, headers=headers, proxies=proxies,
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
except requests.exceptions.ReadTimeout as e:
retry += 1
traceback.print_exc()
if retry > MAX_RETRY: raise TimeoutError
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
stream_response = response.iter_lines()
result = ''
while True:
try: chunk = next(stream_response).decode()
except StopIteration:
break
except requests.exceptions.ConnectionError:
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
if len(chunk)==0: continue
if not chunk.startswith('data:'):
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
if "reduce the length" in error_msg:
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
else:
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
if ('data: [DONE]' in chunk): break # api2d 正常完成
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
delta = json_data["delta"]
if len(delta) == 0: break
if "role" in delta: continue
if "content" in delta:
result += delta["content"]
if not console_slience: print(delta["content"], end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1: observe_window[0] += delta["content"]
# 看门狗,如果超过期限没有喂狗,则终止
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。")
else: raise RuntimeError("意外Json结构:"+delta)
if json_data['finish_reason'] == 'length':
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
info = {}
print("sensitive_chat_result:", result)
# info['result'] = result
# info['token_used'] = response.usage.total_tokens
# 判断输出的结果是否包含pass
if "reject" in result:
info['pass'] = False
elif "pass" in result:
info['pass'] = True
else:
info['pass'] = False
return info
def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg):
from .bridge_all import model_info
openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup'
if "reduce the length" in error_msg:
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
elif "does not exist" in error_msg:
chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
elif "Incorrect API key" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website)
elif "exceeded your current quota" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website)
elif "account is not active" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website)
elif "associated with a deactivated account" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website)
elif "bad forward key" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
elif "Not enough point" in error_msg:
chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.")
else:
from toolbox import regular_txt_to_markdown
tb_str = '```\n' + trimmed_format_exc() + '```'
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
return chatbot, history
def add_black_list(result, api_key):
print("result:", result)
result = str(result)
if "please check your plan" in result or "deactivated" in result or "account associated" in result or "Incorrect API key" in result:
# 先读取现有的黑名单,如果没有则加进去:
cur_black_list = []
try:
with open('black_apis.txt', 'r') as f:
for line in f.readlines():
cur_black_list.append(line.strip())
except FileNotFoundError:
with open('black_apis.txt', 'w') as f:
f.write("")
if api_key not in cur_black_list:
print("add black list:", api_key)
with open('black_apis.txt', 'a+') as f:
f.write(api_key + '\n')
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"""
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
"""
if not is_any_api_key(llm_kwargs['api_key']):
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
# 先判断是否是敏感词:
# 加载敏感词库:
# 从 PKL 文件中读取并恢复 content 对象
with open('sensitive_words.pkl', 'rb') as f:
sensitive_words = pickle.load(f)
with open('zz_sensitive_words.pkl', 'rb') as f:
zz_sensitive_words = pickle.load(f)
with open('sq_sensitive_words.pkl', 'rb') as f:
sq_sensitive_words = pickle.load(f)
tokenized_text = tokenize_text(inputs)
result, found_keywords = contains_sensitive_words(tokenized_text, sensitive_words)
if result:
print("包含敏感词:", found_keywords)
print("奇怪的分词:", tokenized_text)
# openai.proxy = proxies['https']
check_result = check_sensitive(inputs, zz_sensitive_words, sq_sensitive_words, key=api_key, llm_kwargs=llm_kwargs)['pass']
add_black_list(check_result, api_key)
if bool(check_result):
pass_flag = True
else:
pass_flag = False
else:
pass_flag = True
# 如果是敏感词,直接返回预设的回复
if pass_flag == False:
from toolbox import black_list # reject 并拉黑IP
from toolbox import black_num_list # reject 的次数
if llm_kwargs['client_ip'] not in black_list:
black_num_list.append(1)
else:
now_ip_index = black_list.index(llm_kwargs['client_ip'])
black_num_list[now_ip_index] += 1
if llm_kwargs['client_ip'] not in black_list:
black_list.append(llm_kwargs['client_ip']) # reject 并拉黑IP
max_reject_num = 3
now_ip_index = black_list.index(llm_kwargs['client_ip'])
raise AssertionError("禁止输入敏感词汇,若再次尝试您的IP将被本站永久封禁!另外请不要因为好奇,测试这个系统的漏洞!如果有人故意攻击,我们后面会关闭这个功能,只保留arxiv论文翻译。请大家共同珍惜这个免费的学术工具,对于文科的一些敏感词,我们已经努力做了二次检测了,如果还有误杀的,请多包涵。还有{}次机会!".format(max_reject_num-black_num_list[now_ip_index]))
# 如果不是敏感词,正常输出:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
if API_ORG.startswith('org-'): headers.update({"OpenAI-Organization": API_ORG})
if llm_kwargs['llm_model'].startswith('azure-'): headers.update({"api-key": api_key})
conversation_cnt = len(history) // 2
messages = [{"role": "system", "content": system_prompt}]
if conversation_cnt:
for index in range(0, 2*conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = "user"
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = "assistant"
what_gpt_answer["content"] = history[index+1]
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "": continue
if what_gpt_answer["content"] == timeout_bot_msg: continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
model = llm_kwargs['llm_model'].strip('api2d-')
if RANDOM_LLM:
import random
avail_m_list = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
]
model = random.choice(avail_m_list) # 随机负载均衡
print("随机负载均衡:", model)
payload = {
"model": model,
"messages": messages,
"temperature": llm_kwargs['temperature'], # 1.0,
"top_p": llm_kwargs['top_p'], # 1.0,
"n": 1,
"stream": stream,
"presence_penalty": 0,
"frequency_penalty": 0,
}
try:
print(f" original {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
except:
print('输入中可能存在乱码。')
return headers, payload
| [
"用户的输入文本内容:INPUT\n 现在对于一个学术版的GPT网站,面对各种奇怪的用户输入,你需要先判定出用户的输入是否是纯学术上的讨论,不涉及政治和色情等敏感话题。 \n 请帮忙自动判断用户的输入是否涉及到政治和色情等敏感话题:\n 输出格式如下:\n 如果输入和输出都涉及这两个敏感话题,尤其是涉及到挑衅中国国家利益,损害中国人民权益,玷污中国人民政府和共产党、妄议中国国家领导人等问题,请输出:\n [\"reject\"]\n 如果输入和输出不涉及这两个敏感话题,只涉及到一些学术问题,请输出:\n [\"pass\"]\n 提供部分敏感词库:\n 政治敏感词库:PLACEHOLDER\n 色情敏感词库:PLACEHOLDER\n 提供部分输出样例: \n demo1:\n \"input\": \"我现在完全不想学习,想睡觉,想干有意思的事\"\n 输出:[\"pass\"]\n \n demo2:\n \"input\": \"如何做爱?\"\n 输出:[\"reject\"]\n \n demo3:\n \"input\": \"台湾\"\n 输出:[\"reject\"]\n \n demo4:\n \"input\": \"如何部署一个网站\"\n 输出:[\"pass\"]\n \n demo5:\n \"input\": \"中国政府今年带领中国人民取得出口贸易额成功突破35%的增长大关\"\n 输出:[\"pass\"]\n \n demo5:\n \"input\": \"新疆棉花\"\n 输出:[\"reject\"]\n \n demo6:\n \"input\": \"李克强\"\n 输出:[\"reject\"]\n \n demo7:\n \"input\": \"武汉新冠肺炎\"\n 输出:[\"reject\"]\n \n 请记住其他一切可能导致不利于中国国家利益、损害中国人民权益(尤其是未成年人权益的),玷污中国人民政府和共产党的输出,都需要reject!\n 保留对学术问题的pass,对美好生活和日常工作和技术内容的pass.\n 最后,请记住,你只是做一个简单的填空题,输出只是根据上下文,选择[\"reject\"] 或者 [\"pass\"],不能包含任何其他文本信息。\n \n ",
"你是一个中国人,积极维护中国人民的利益和中国人民政府以及中国共产党。\n 在为用户提供学术服务之前,面对各种各样的输入时,你暂时充当一个敏感词检测员,你需要判断用户的输入问题是否涉及到政治,或者色情等不好的信息.",
"现在对于一个学术版的GPT网站,面对各种奇怪的用户输入,你需要先判定出用户的输入是否是纯学术上的讨论,不涉及政治和色情等敏感话题。\n 请帮忙自动判断用户的输入是否涉及到政治和色情等敏感话题。"
] |
2024-01-10 | kaixindelele/gpt_academic_bk | request_llm~check_sensitive_words.py | import requests
import openai
def generate_text(input):
openai.api_key = "sk-xxxx"
zz_sensitive_words = """一些中国国家领导人的姓名:
习近平,
平近习,
xjp,
习太子,
习明泽,
老习,
温家宝,
hujintao,
胡jintao,
胡j涛,
胡主席,
刘永清,
江泽民,
民泽江,
江主席,
江绵恒,
邓小平,
平小邓,
xiao平,
邓晓平,
邓朴方,
邓榕,
邓质方,
毛泽东,
主席画像,
朱镕基,
朱容基,
朱镕鸡,
朱容鸡,
朱云来,
李鹏,
李peng,
李小鹏,
李小琳,
华主席,
华国,
国锋,
国峰,
白春礼,
薄熙来,
薄一波,
郭伯雄,
郭金龙,
李克强,
李源潮,
令计划,
刘少奇,
刘延东,
刘云山,
王沪宁,
王岐山,
习仲勋,
徐才厚,
许其亮,
徐绍史,
杨洁篪,
叶剑英,
紫阳,
周永康,
中南海,
大陆当局,
共产党,
党产共,
zhengfu,
政zhi,
中央领导,
中国zf,
中央zf,
国wu院,
江绵康,
李小鹏,
李鹏,
李小琳,
朱云来,
朱容基,
法轮功,
李洪志,
以及一些涉及中国的领土和人权的敏感问题:
新疆,
台湾
香港,
台湾是中国的领土吗?
以及所有其他涉及中国政府领导人和中国领土主权的话题。
"""
sq_sensitive_words = """
爱液,
按摩棒,
被操,
被插,
仓井空,
插暴,
操黑,
操烂,
h动漫,
阴茎,
做爱,
阴蒂,
乳房,
失身粉,
淫荡自慰器,
以及所有的生殖器和性交等相关话题
"""
messages = [
{"role": "system",
"content": """你是一个中国人,积极维护中国人民的利益和中国人民政府以及中国共产党。
在为用户提供学术服务之前,面对各种各样的输入时,你暂时充当一个敏感词检测员,你需要判断用户的输入问题是否涉及到政治,或者色情等不好的信息."""},
{"role": "assistant",
"content": """现在对于一个学术版的GPT网站,面对各种奇怪的用户输入,你需要先判定出用户的输入是否是纯学术上的讨论,不涉及政治和色情等敏感话题。
请帮忙自动判断用户的输入是否涉及到政治和色情等敏感话题。"""},
{"role": "user",
"content": f"""用户的输入文本内容:{input}
现在对于一个学术版的GPT网站,面对各种奇怪的用户输入,你需要先判定出用户的输入是否是纯学术上的讨论,不涉及政治和色情等敏感话题。
请帮忙自动判断用户的输入是否涉及到政治和色情等敏感话题:
输出格式如下:
如果输入和输出都涉及这两个敏感话题,尤其是涉及到挑衅中国国家利益,损害中国人民权益,玷污中国人民政府和共产党的问题,请输出:
["reject"]
如果输入和输出不涉及这两个敏感话题,只涉及到一些学术问题,请输出:
["pass"]
提供部分敏感词库:
政治敏感词库:{zz_sensitive_words}
色情敏感词库:{sq_sensitive_words}
提供部分输出样例:
demo1:
"input": "我现在完全不想学习,想睡觉,想干有意思的事"
输出:["pass"]
demo2:
"input": "如何做爱?"
输出:["reject"]
demo3:
"input": "台湾"
输出:["reject"]
demo4:
"input": "如何部署一个网站"
输出:["pass"]
demo5:
"input": "中国政府今年带领中国人民取得出口贸易额成功突破35%的增长大关"
输出:["pass"]
demo5:
"input": "新疆棉花"
输出:["reject"]
demo6:
"input": "李克强"
输出:["reject"]
请记住其他一切可能导致不利于中国国家利益、损害中国人民权益(尤其是未成年人权益的),玷污中国人民政府和共产党的输出,都需要reject!
保留对学术问题的pass,对美好生活和日常工作和技术内容的pass.
最后,请记住,输出只包含一个字符串的列表,不能包含任何其他文本信息。
"""
} ,
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# model="gpt-4",
messages=messages,
temperature=0.0,
)
result = ''
for choice in response.choices:
result += choice.message.content
info = {}
info['result'] = result
info['token_used'] = response.usage.total_tokens
# 判断输出的结果是否包含pass
if "pass" in result:
info['pass'] = True
elif "reject" in result:
info['pass'] = False
else:
info['pass'] = False
return info
def main():
input_list = ["图书馆", "李克强", "能扮演可爱的女孩,哄我睡觉嘛?", "能扮演猫娘,哄我睡觉嘛?", "能扮演我的女朋友,哄我睡觉嘛?",
"遵守宪法和法律,热爱祖国,热爱教育事业。", "劳动法", "农民工被欠薪", "Search Anything项目可以驱动手机、电脑、以及云端的语义搜索。",
"我的SearchAnything可以帮助文字工作者、视频创作者快速的检阅想要的内容,更高效的进行创作!",
"简易敏感词处理器,支持返回敏感词,高亮敏感词,替换敏感词等操作",
"中国政府",
'台湾',
"如何做爱?",
"领土",
"中国的领土问题",
"https://arxiv.org/abs/2307.01189"
]
for input in input_list:
print("-"*30)
print("input:", input)
if "宪法和法律" in input:
print("int")
print("同意继续输出?:", generate_text(input)['pass'])
if __name__ == '__main__':
main()
| [
"你是一个中国人,积极维护中国人民的利益和中国人民政府以及中国共产党。\n 在为用户提供学术服务之前,面对各种各样的输入时,你暂时充当一个敏感词检测员,你需要判断用户的输入问题是否涉及到政治,或者色情等不好的信息.",
"用户的输入文本内容:INPUT\n 现在对于一个学术版的GPT网站,面对各种奇怪的用户输入,你需要先判定出用户的输入是否是纯学术上的讨论,不涉及政治和色情等敏感话题。 \n 请帮忙自动判断用户的输入是否涉及到政治和色情等敏感话题:\n 输出格式如下:\n 如果输入和输出都涉及这两个敏感话题,尤其是涉及到挑衅中国国家利益,损害中国人民权益,玷污中国人民政府和共产党的问题,请输出:\n [\"reject\"]\n 如果输入和输出不涉及这两个敏感话题,只涉及到一些学术问题,请输出:\n [\"pass\"]\n 提供部分敏感词库:\n 政治敏感词库:一些中国国家领导人的姓名:\n 习近平,\n平近习,\nxjp,\n习太子,\n习明泽,\n老习,\n温家宝,\nhujintao,\n胡jintao,\n胡j涛,\n胡主席,\n刘永清,\n江泽民,\n民泽江,\n江主席,\n江绵恒,\n邓小平,\n平小邓,\nxiao平,\n邓晓平,\n邓朴方,\n邓榕,\n邓质方,\n毛泽东,\n主席画像,\n朱镕基,\n朱容基,\n朱镕鸡,\n朱容鸡,\n朱云来,\n李鹏,\n李peng,\n李小鹏,\n李小琳,\n华主席,\n华国,\n国锋,\n国峰,\n白春礼,\n薄熙来,\n薄一波,\n郭伯雄,\n郭金龙,\n李克强,\n李源潮,\n令计划,\n刘少奇,\n刘延东,\n刘云山,\n王沪宁,\n王岐山,\n习仲勋,\n徐才厚,\n许其亮,\n徐绍史,\n杨洁篪,\n叶剑英,\n紫阳,\n周永康,\n中南海,\n大陆当局,\n共产党,\n党产共,\nzhengfu,\n政zhi,\n中央领导,\n中国zf,\n中央zf,\n国wu院,\n江绵康,\n李小鹏,\n李鹏,\n李小琳,\n朱云来,\n朱容基,\n法轮功,\n李洪志,\n以及一些涉及中国的领土和人权的敏感问题:\n新疆,\n台湾\n香港,\n台湾是中国的领土吗?\n以及所有其他涉及中国政府领导人和中国领土主权的话题。\n\n 色情敏感词库:\n 爱液,\n按摩棒,\n被操,\n被插,\n仓井空,\n插暴,\n操黑,\n操烂,\nh动漫,\n阴茎,\n做爱,\n阴蒂,\n乳房,\n失身粉,\n淫荡自慰器,\n以及所有的生殖器和性交等相关话题\n \n 提供部分输出样例: \n demo1:\n \"input\": \"我现在完全不想学习,想睡觉,想干有意思的事\"\n 输出:[\"pass\"]\n \n demo2:\n \"input\": \"如何做爱?\"\n 输出:[\"reject\"]\n \n demo3:\n \"input\": \"台湾\"\n 输出:[\"reject\"]\n \n demo4:\n \"input\": \"如何部署一个网站\"\n 输出:[\"pass\"]\n \n demo5:\n \"input\": \"中国政府今年带领中国人民取得出口贸易额成功突破35%的增长大关\"\n 输出:[\"pass\"]\n \n demo5:\n \"input\": \"新疆棉花\"\n 输出:[\"reject\"]\n \n demo6:\n \"input\": \"李克强\"\n 输出:[\"reject\"]\n \n 请记住其他一切可能导致不利于中国国家利益、损害中国人民权益(尤其是未成年人权益的),玷污中国人民政府和共产党的输出,都需要reject!\n 保留对学术问题的pass,对美好生活和日常工作和技术内容的pass.\n 最后,请记住,输出只包含一个字符串的列表,不能包含任何其他文本信息。\n \n ",
"现在对于一个学术版的GPT网站,面对各种奇怪的用户输入,你需要先判定出用户的输入是否是纯学术上的讨论,不涉及政治和色情等敏感话题。\n 请帮忙自动判断用户的输入是否涉及到政治和色情等敏感话题。"
] |
2024-01-10 | niatro/SETRAM-AI | interface.py | import os
import requests
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.prompts import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from bs4 import BeautifulSoup
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import json
from autogen import config_list_from_json
from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
from autogen import UserProxyAgent
import autogen
import panel as pn
import asyncio
load_dotenv()
brwoserless_api_key = os.getenv("BROWSERLESS_API_KEY")
serper_api_key = os.getenv("SERP_API_KEY")
airtable_api_key = os.getenv("AIRTABLE_API_KEY")
config_list = config_list_from_json("OAI_CONFIG_LIST")
gpt4_config = {"config_list": config_list, "temperature":0, "seed": 53} # El seed hace deterministica la respuesta
# ------------------ Cambio en la libreria de autogen ------------------
#C:\Users\Nicolas\anaconda3\envs\autogen_py310_env\Lib\site-packages\autogen\agentchat
# ------------------ Esta cambiarla ------------------
#self.register_reply([Agent, None], ConversableAgent.a_check_termination_and_human_reply)
# ------------------ Por esta ------------------
#self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply)
# ------------------ Crear las funciones ------------------ #
# Funcion de busqueda
def google_search(search_keyword):
url = "https://google.serper.dev/search"
payload = json.dumps({
"q": search_keyword
})
headers = {
'X-API-KEY': serper_api_key,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print("RESPONSE:", response.text)
return response.text
# Funcion para resumir
def resumen(objective, content):
llm = ChatOpenAI(temperature = 0, model = "gpt-3.5-turbo-16k-0613")
text_splitter = RecursiveCharacterTextSplitter(separators=["\n\n", "\n"], chunk_size = 10000, chunk_overlap=500)
docs = text_splitter.create_documents([content])
map_prompt = """
Write a summary of the following text for {objective}:
"{text}"
SUMMARY:
"""
map_prompt_template = PromptTemplate(template=map_prompt, input_variables=["text", "objective"])
summary_chain = load_summarize_chain(
llm=llm,
chain_type='map_reduce',
map_prompt = map_prompt_template,
combine_prompt = map_prompt_template,
verbose = False
)
output = summary_chain.run(input_documents=docs, objective=objective)
return output
# Funcion para hacer scrapping
def web_scraping(objective: str, url: str):
#scrape sitio web, y también resumirá el contenido según el objetivo si el contenido es demasiado grande
#objetivo es el objetivo y la tarea originales que el usuario le asigna al agente, la URL es la URL del sitio web que se va a eliminar.
print("Scraping website...")
# Define the headers for the request
headers = {
'Cache-Control': 'no-cache',
'Content-Type': 'application/json',
}
# Define the data to be sent in the request
data = {
"url": url
}
# Convert Python object to JSON string
data_json = json.dumps(data)
# Send the POST request
response = requests.post(f"https://chrome.browserless.io/content?token={brwoserless_api_key}", headers=headers, data=data_json)
# Check the response status code
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
text = soup.get_text()
print("CONTENTTTTTT:", text)
if len(text) > 10000:
output = resumen(objective,text)
return output
else:
return text
else:
print(f"HTTP request failed with status code {response.status_code}")
# ------------------ Crear funciones de agentes ------------------ #
input_future = None
initiate_chat_task_created = False
class MyConversableAgent(autogen.ConversableAgent):
async def a_get_human_input(self, prompt: str) -> str:
global input_future
print('AGET!!!!!!') # or however you wish to display the prompt
chat_interface.send(prompt, user="System", respond=False)
# Create a new Future object for this input operation if none exists
if input_future is None or input_future.done():
input_future = asyncio.Future()
# Wait for the callback to set a result on the future
await input_future
# Once the result is set, extract the value and reset the future for the next input operation
input_value = input_future.result()
input_future = None
return input_value
async def delayed_initiate_chat(agent, recipient, message):
global initiate_chat_task_created
# Indicate that the task has been created
initiate_chat_task_created = True
# Wait for 2 seconds
await asyncio.sleep(2)
# Now initiate the chat
await agent.a_initiate_chat(recipient, message=message)
user_proxy = MyConversableAgent(
name="Admin",
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
system_message="""A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved by this admin.
""",
#Only say APPROVED in most cases, and say TERMINATE when nothing to be done further. Do not say others.
code_execution_config=False,
#default_auto_reply="Approved",
human_input_mode="ALWAYS",
#llm_config=gpt4_config,
)
# Crear agente investigador
researcher = GPTAssistantAgent(
name = "researcher",
llm_config = {
"config_list": config_list,
"assistant_id": "asst_aPJMdifV02oopBypJPxYgAKw"
}
)
# Crear las funciones que usara el researcher
researcher.register_function(
function_map={
"google_search": google_search,
"web_scraping": web_scraping
}
)
# Crear agente administrador de investigación
research_manager = GPTAssistantAgent(
name="research_manager",
llm_config = {
"config_list": config_list,
"assistant_id": "asst_vzMkR7T4kiwwxbJ4wF7cE3XJ"
}
)
scientist = autogen.AssistantAgent(
name="Scientist",
human_input_mode="NEVER",
llm_config=gpt4_config,
system_message="""Scientist. You follow an approved plan. You are able to categorize papers after seeing their abstracts printed. You don't write code."""
)
planner = autogen.AssistantAgent(
name="Planner",
human_input_mode="NEVER",
system_message='''Planner. Suggest a plan. Revise the plan based on feedback from admin and critic, until admin approval.
Explain the plan first. Be clear which step is performed by an engineer, and which step is performed by a scientist.
''',
llm_config=gpt4_config,
)
critic = autogen.AssistantAgent(
name="Critic",
system_message="""Critic. Double check plan, claims, code from other agents and provide feedback.
Check whether the plan includes adding verifiable info such as source URL.
""",
llm_config=gpt4_config,
human_input_mode="NEVER",
)
groupchat = autogen.GroupChat(agents=[user_proxy, researcher, research_manager], messages=[], max_round=20)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=gpt4_config)
avatar = {user_proxy.name:"👨💼", research_manager.name:"👩💻", scientist.name:"👩🔬", planner.name:"🗓", researcher.name:"🛠", critic.name:'📝'}
def print_messages(recipient, messages, sender, config):
#chat_interface.send(messages[-1]['content'], user=messages[-1]['name'], avatar=avatar[messages[-1]['name']], respond=False)
print(f"Messages from: {sender.name} sent to: {recipient.name} | num messages: {len(messages)} | message: {messages[-1]}")
if all(key in messages[-1] for key in ['name']):
chat_interface.send(messages[-1]['content'], user=messages[-1]['name'], avatar=avatar[messages[-1]['name']], respond=False)
else:
chat_interface.send(messages[-1]['content'], user='SecretGuy', avatar='🥷', respond=False)
return False, None # required to ensure the agent communication flow continues
user_proxy.register_reply(
[autogen.Agent, None],
reply_func=print_messages,
config={"callback": None},
)
researcher.register_reply(
[autogen.Agent, None],
reply_func=print_messages,
config={"callback": None},
)
scientist.register_reply(
[autogen.Agent, None],
reply_func=print_messages,
config={"callback": None},
)
planner.register_reply(
[autogen.Agent, None],
reply_func=print_messages,
config={"callback": None},
)
research_manager.register_reply(
[autogen.Agent, None],
reply_func=print_messages,
config={"callback": None},
)
critic.register_reply(
[autogen.Agent, None],
reply_func=print_messages,
config={"callback": None},
)
pn.extension(design="material")
initiate_chat_task_created = False
async def delayed_initiate_chat(agent, recipient, message):
global initiate_chat_task_created
# Indicate that the task has been created
initiate_chat_task_created = True
# Wait for 2 seconds
await asyncio.sleep(2)
# Now initiate the chat
await agent.a_initiate_chat(recipient, message=message)
async def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
global initiate_chat_task_created
global input_future
if not initiate_chat_task_created:
asyncio.create_task(delayed_initiate_chat(user_proxy, manager, contents))
else:
if input_future and not input_future.done():
input_future.set_result(contents)
else:
print("Actualmente no hay ninguna respuesta en espera.")
chat_interface = pn.chat.ChatInterface(callback=callback)
chat_interface.send("Enviar un mensaje!", user="System", respond=False)
chat_interface.servable()
| [
"\n Write a summary of the following text for {objective}:\n \"{text}\"\n SUMMARY:\n "
] |
2024-01-10 | AI4Finance-Foundation/FinNLP | finnlp~large_language_models~openai~app4gpt_chat_agent.py | # https://www.app4gpt.com
# A replacement for openai's API in China
import openai
import numpy as np
class App4gpt_Chat_Agent:
def __init__(self,args):
assert "token" in args.keys()
openai.api_key = args["token"]
openai.api_base = "https://api.app4gpt.com/v1"
self.temperature = args["temperature"] if "temperature" in args.keys() else 1
self.top_p = args["top_p"] if "top_p" in args.keys() else 1
self.n = args["n"] if "n" in args.keys() else 1
self.max_tokens = args["max_tokens"] if "max_tokens" in args.keys() else None
self.presence_penalty = args["presence_penalty"] if "presence_penalty" in args.keys() else 0
self.frequency_penalty = args["frequency_penalty"] if "frequency_penalty" in args.keys() else 0
self.conversation_list = []
if "init_prompt" in args.keys():
self.conversation_list.append(
{"role":"system","content":args["init_prompt"]}
)
def get_single_response(self,prompt, model = "gpt-3.5-turbo"):
self.conversation_list.append({"role":"user","content":prompt})
response = openai.ChatCompletion.create(
model = model,
messages = self.conversation_list,
temperature = self.temperature,
top_p = self.top_p,
n = self.n,
max_tokens = self.max_tokens,
presence_penalty = self.presence_penalty,
frequency_penalty = self.frequency_penalty,
)
answer = response.choices[0].message['content']
self.conversation_list.append({"role":"assistant","content":answer})
return answer
def show_conversation(self):
conversation_list = self.conversation_list
for msg in conversation_list:
content = msg['content']
content = content.replace(".",".\n")
if msg['role'] == 'user':
print(f"\U0001F47B: {content}\n")
elif msg['role'] == 'system':
print(f"\U0001F4BB: {content}\n")
else:
print(f"\U0001F916: {content}\n")
def get_multiple_response(self,prompts):
pass
| [
"init_prompt"
] |
2024-01-10 | YanJiaHuan/Text2Sql | multi_turn~Bard_GPT~V1~V1.py | import pandas as pd
import time
import re
import openai
import os
from os import environ
import sys
import tiktoken
import sqlite3
from Bard import Chatbot
'''
This is a GPT-Bard contrasive prompting method, in this demo, we still focus on Spider,
but the difference will be: The few-shots are generated by Bard.
Version 1:
1. The few-shot is generated by Bard.
2. Bard will generate SQL/question samples based on the real input SQL/question.
3. GPT know the few-shots are generated by Bard.
4. Bard know it is Spider task.
'''
#################### 0. Prompt ####################
SQL_generation_prompt = '''
You are an expert in SQL. I will give you a natural language question and a database schema,
please help me generate the corresponding SQL query with no further explaination.
'''
few_shot_generation_prompt_Bard = '''
You are an expert in SQL. I will give you a database schema in Spider dataset, and you need to generate three
SQL queries with natural language questions based on the schema.
'''
three_shots_SQL_generation_prompt = '''
Here is some examples of EASY, MEDIUM and HARD SQL queries.
SELECT count(*) FROM singer
SELECT avg(weight) , pettype FROM pets GROUP BY pettype
SELECT T1.fname , T1.age FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'dog' AND T1.stuid NOT IN (SELECT T1.stuid FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'cat')
'''
zero_shots_SQL_generation_prompt = '''
Sorry, I won't give you any examples. Please generate based on your own semantic parsing ability.
'''
three_shots_SQL_generation_prompt_from_Bard = '''
I will give you some expamples of how SQL is generated, please follow the instructions and generate your own answer(SQL).
'''
three_shot_Spider_prompt_without_explain = '''
Here is a sample of text2sql for you to understand the task.
Table advisor, columns = [*,s_ID,i_ID]
Table classroom, columns = [*,building,room_number,capacity]
Table course, columns = [*,course_id,title,dept_name,credits]
Table department, columns = [*,dept_name,building,budget]
Table instructor, columns = [*,ID,name,dept_name,salary]
Table prereq, columns = [*,course_id,prereq_id]
Table section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]
Table student, columns = [*,ID,name,dept_name,tot_cred]
Table takes, columns = [*,ID,course_id,sec_id,semester,year,grade]
Table teaches, columns = [*,ID,course_id,sec_id,semester,year]
Table time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]
foreign key:[course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]
primary key:[classroom.building,department.dept_name,course.course_id,instructor.ID,section.course_id,teaches.ID,student.ID,takes.ID,advisor.s_ID,time_slot.time_slot_id,prereq.course_id]
example 1:
Question: Find out the average salary of professors?
SELECT avg ( salary ) FROM instructor
example 2:
Question: Find the average salary of the professors of each department?
SELECT avg ( salary ) , dept_name FROM instructor GROUP BY dept_name
example 3:
Question: Which department has the highest average salary of professors?
SELECT dept_name FROM instructor GROUP BY dept_name ORDER BY avg ( salary ) DESC LIMIT 1
'''
checker_prompt = '''
Please help me generate the corresponding SQL query with no further explaination.
'''
#################### 1. Set up ####################
#----------------------------------------------------------------------------------------------------------
# API_KEY = "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" # 自己的
API_KEY = "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" # 买的
# API_KEY = "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" # gpt4 孙哥
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = os.getenv("OPENAI_API_KEY")
#changed
task = 'Spider' # 1 for CoSQL, 2 for Spider
if task == 'CoSQL':
path_to_CoSQL = "./cosql_dataset"
DATASET_SCHEMA = path_to_CoSQL+"/tables.json"
DATASET = path_to_CoSQL+"/sql_state_tracking/cosql_dev.json"
OUTPUT_FILE_1 = "./predicted_sql.txt"
OUTPUT_FILE_2 = "./gold_sql.txt"
DATABASE_PATH = path_to_CoSQL+"/database"
else:
path_to_Spider = "/Users/yan/Desktop/text2sql/spider"
DATASET_SCHEMA = path_to_Spider + "/tables.json"
DATASET = path_to_Spider + "/dev.json"
OUTPUT_FILE_1 = "./Spider/predicted_sql.txt"
OUTPUT_FILE_2 = "./Spider/gold_sql.txt"
DATABASE_PATH = path_to_Spider + "/database"
# set max tokens limit
MAX_TOKENS = 4096
model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
encoding = tiktoken.encoding_for_model(model_name)
# count the token
def num_tokens_from_string(string: str, model_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# load dataset
def load_data(DATASET):
return pd.read_json(DATASET)
def find_foreign_keys_MYSQL_like(db_name):
df = spider_foreign[spider_foreign['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['First Table Name'] + '.' + row['First Table Foreign Key'] + " = " + row['Second Table Name'] + '.' + row['Second Table Foreign Key'] + ','
output= output[:-1] + "]"
return output
def find_fields_MYSQL_like(db_name):
df = spider_schema[spider_schema['Database name'] == db_name]
df = df.groupby(' Table Name')
output = ""
for name, group in df:
output += "Table " +name+ ', columns = ['
for index, row in group.iterrows():
output += row[" Field Name"]+','
output = output[:-1]
output += "]\n"
return output
def find_primary_keys_MYSQL_like(db_name):
df = spider_primary[spider_primary['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['Table Name'] + '.' + row['Primary Key'] +','
output = output[:-1]
output += "]\n"
return output
def creatiing_schema(DATASET_JSON):
schema_df = pd.read_json(DATASET_JSON)
schema_df = schema_df.drop(['column_names','table_names'], axis=1)
schema = []
f_keys = []
p_keys = []
for index, row in schema_df.iterrows():
tables = row['table_names_original']
col_names = row['column_names_original']
col_types = row['column_types']
foreign_keys = row['foreign_keys']
primary_keys = row['primary_keys']
for col, col_type in zip(col_names, col_types):
index, col_name = col
if index == -1:
for table in tables:
schema.append([row['db_id'], table, '*', 'text'])
else:
schema.append([row['db_id'], tables[index], col_name, col_type])
for primary_key in primary_keys:
index, column = col_names[primary_key]
p_keys.append([row['db_id'], tables[index], column])
for foreign_key in foreign_keys:
first, second = foreign_key
first_index, first_column = col_names[first]
second_index, second_column = col_names[second]
f_keys.append([row['db_id'], tables[first_index], tables[second_index], first_column, second_column])
spider_schema = pd.DataFrame(schema, columns=['Database name', ' Table Name', ' Field Name', ' Type'])
spider_primary = pd.DataFrame(p_keys, columns=['Database name', 'Table Name', 'Primary Key'])
spider_foreign = pd.DataFrame(f_keys,
columns=['Database name', 'First Table Name', 'Second Table Name', 'First Table Foreign Key',
'Second Table Foreign Key'])
return spider_schema,spider_primary,spider_foreign
def SQL_checker(sql, database):
# sql be like: "SELECT * FROM car_1 WHERE car_1.id = 1"
# database is the path to local xxx.sqlite
# the function of this part is to check if the sql is valid, if not, return the error message
path = DATABASE_PATH + '/' + database + '/' + database + '.sqlite'
try:
# Connect to the SQLite database
conn = sqlite3.connect(path)
# Create a cursor object to execute the SQL query
cursor = conn.cursor()
# Execute the SQL query
cursor.execute(sql)
# Commit the transaction and close the connection
conn.commit()
conn.close()
# Return a success message if the SQL query is valid
prompt = "The SQL query is valid in grammar."
checker = False
except sqlite3.Error as e:
# Return the error message if the SQL query is not valid
instruction = f"""#### the sql generated by you: {sql}, has error like :{e} , please fix the error and generate again. \n"""
fields = find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "Primary_keys = " + find_primary_keys_MYSQL_like(database)
prompt = instruction + fields + checker_prompt
checker = True
return prompt, checker
import time
def GPT4_generation(prompt):
'''
openai.error.RateLimitError: Rate limit reached for default-gpt-3.5-turbo
in organization org-GFmlumrCZBB2Y40fVv7f8qgp on requests per min. Limit: 3 / min.
Please try again in 20s. Contact us through our help center at help.openai.com if you continue to have issues.
Please add a payment method to your account to increase your rate limit.
Visit https://platform.openai.com/account/billing to add a payment method.
'''
limit_marker = False
fake_SQL = "SELECT COUNT(*) FROM singer"
while True:
try:
response = openai.ChatCompletion.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=600,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
return response['choices'][0]['message']['content'], limit_marker
except openai.error.RateLimitError as e:
print(f"RateLimitError: {e}")
print("Sleeping for 20 seconds...")
time.sleep(20)
print("Retrying...")
except Exception as e:
print(f"Unexpected error: {e}")
return fake_SQL, limit_marker
# initial the chatbot
def extract_sql(response):
matches = re.findall(r'```sql\n(.*?)\n```', response, re.DOTALL)
return matches
tokens=(
"WwiJN0oLURBx7gX_O8WVz9Fufj1iefdzkpt2fsbsb-e8al2Kvufapnj5mYa6vGo5P1ub9w.",
"WwhXnsbFLxozhOKG1-NUO78iif9IiN5El3Qk9yk5fi70TMcaUMOwfWwjTyqAyNe6MCtiEA.",
"Wwi1wxVyz-X2piJk8Ts84d08Fm1UmHDTOS7ftlD6LCXdbUVjFrQlJfl97an8UHhZQM8juQ.",
"Wwj6xMcUvzQUaKwcRQ-qvwrIcZLDBRp9XP25HkEVBAJDVZBzujepzI_dttehdJiCAjCIMg.",
"WwjMZ_TL9xIl4jREPppT5df6tAsjLLgjRo_GKK5iLslGOh5lMtstOMP_iJEADXq6gjFEKA.",
"Wgj-oa5yHxfmjo0lLybtWGLiWYoKTZ07NXcUiaPiUHmtQQiAKlfzNTOA9lwqmCz2N0qGFg."
)
def Bard_generation(prompt):
limit_marker = False
token_index = 0
chatbot = Chatbot(tokens[token_index])
answer = chatbot.ask(prompt)
print('whole answer', answer)
while True: # This loop will continue until a string is returned
if isinstance(answer, dict): # check if answer is a dictionary (error response)
limit_marker = True
print("Token limit reached, switching to a new token...")
token_index += 1 # Move to the next token
if token_index >= len(tokens): # If we've used all tokens, start over
token_index = 0
print("exceeding total limit, Waiting 15 seconds...")
time.sleep(15) # freeze for 15s
chatbot = Chatbot(tokens[token_index]) # Create a new chatbot with the new token
answer = chatbot.ask(prompt) # resend the request
else:
return answer[0][0], limit_marker
def save_breaker(breaker):
with open("breaker.txt", "w") as f:
f.write(str(breaker))
# Function to load the breaker value from a file
def load_breaker():
if os.path.exists("breaker.txt"):
with open("breaker.txt", "r") as f:
breaker = int(f.read())
if breaker > 1037:
breaker = 0
else:
breaker = breaker
return breaker
return 0
if __name__ == '__main__':
###########################################################################################
# load the data
spider_schema,spider_primary,spider_foreign = creatiing_schema(DATASET_SCHEMA)
val_df = load_data(DATASET)
SQLs_temp_pred = []
SQLs_temp_gold = []
for index,sample in val_df.iterrows():
print('index:',index)
db_id = sample['db_id'] # e.g.'car_1'
question = sample['question'] # e.g.'How many car models are produced by each maker? List the count and the maker full name.'
SQL_gold = sample['query'] # e.g.'SELECT COUNT(*) FROM car_1 WHERE car_1.id = 1'
print('SQL_gold:',SQL_gold)
schema = find_fields_MYSQL_like(db_id) + '\n' + "foreign key:" + find_foreign_keys_MYSQL_like(
db_id) + '\n' + "primary key:" + find_primary_keys_MYSQL_like(db_id) #
###############################################
'''message to Bard, to get few-shots'''
message_Bard = few_shot_generation_prompt_Bard + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema
print('message to Bard:', message_Bard)
response_Bard, _ = Bard_generation(message_Bard)
print('response_Bard:', response_Bard)
###############################################
'''message to GPT, to get SQL'''
message_GPT = three_shots_SQL_generation_prompt_from_Bard + \
response_Bard + \
SQL_generation_prompt + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema + \
"Just give me the plain SQL without any placeholders." + \
"\nquestion:" + question+ \
"\nYour SQL:"
print('message to GPT3.5:', message_GPT)
SQL, limit_marker = GPT4_generation(message_GPT)
print('SQL:', SQL)
SQL = SQL.replace('\n', ' ')
print('\nGPT generated SQL:', SQL + '\n')
SQLs_temp_pred.append(SQL)
SQLs_temp_gold.append(SQL_gold+'\t'+db_id)
with open ('predicted_sql.txt', 'a') as f:
f.write(SQL+'\n')
with open ('gold_sql.txt', 'a') as f:
f.write(SQL_gold+'\t'+db_id+'\n')
# CUDA_VISIBLE_DEVICES=7 python read_cosql.py | [
"\nYou are an expert in SQL. I will give you a database schema in Spider dataset, and you need to generate three\nSQL queries with natural language questions based on the schema.\n",
"\nI will give you some expamples of how SQL is generated, please follow the instructions and generate your own answer(SQL).\n",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"\nPlease help me generate the corresponding SQL query with no further explaination.\n",
"\nSorry, I won't give you any examples. Please generate based on your own semantic parsing ability.\n",
"The SQL query is valid in grammar.",
"\nHere is a sample of text2sql for you to understand the task.\nTable advisor, columns = [*,s_ID,i_ID]\nTable classroom, columns = [*,building,room_number,capacity]\nTable course, columns = [*,course_id,title,dept_name,credits]\nTable department, columns = [*,dept_name,building,budget]\nTable instructor, columns = [*,ID,name,dept_name,salary]\nTable prereq, columns = [*,course_id,prereq_id]\nTable section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]\nTable student, columns = [*,ID,name,dept_name,tot_cred]\nTable takes, columns = [*,ID,course_id,sec_id,semester,year,grade]\nTable teaches, columns = [*,ID,course_id,sec_id,semester,year]\nTable time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]\n\nforeign key:[course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]\nprimary key:[classroom.building,department.dept_name,course.course_id,instructor.ID,section.course_id,teaches.ID,student.ID,takes.ID,advisor.s_ID,time_slot.time_slot_id,prereq.course_id]\n\nexample 1:\nQuestion: Find out the average salary of professors?\nSELECT avg ( salary ) FROM instructor\n\nexample 2:\nQuestion: Find the average salary of the professors of each department?\nSELECT avg ( salary ) , dept_name FROM instructor GROUP BY dept_name\n\nexample 3:\nQuestion: Which department has the highest average salary of professors?\nSELECT dept_name FROM instructor GROUP BY dept_name ORDER BY avg ( salary ) DESC LIMIT 1\n",
"\nYou are an expert in SQL. I will give you a natural language question and a database schema, \nplease help me generate the corresponding SQL query with no further explaination.\n",
"\nHere is some examples of EASY, MEDIUM and HARD SQL queries.\nSELECT count(*) FROM singer \nSELECT avg(weight) , pettype FROM pets GROUP BY pettype\nSELECT T1.fname , T1.age FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'dog' AND T1.stuid NOT IN (SELECT T1.stuid FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'cat')\n"
] |
2024-01-10 | YanJiaHuan/Text2Sql | multi_turn~Bard_GPT~V0~V0.py | import pandas as pd
import time
import re
import openai
import os
from os import environ
import sys
import tiktoken
import sqlite3
from Bard import Chatbot
'''
This is a zero-shot prompting trail for GPT-3.5, which works as a baseline for our experimens.
Version 0:
1. GPT will be given a database schema and a natural language question, and it will generate the corresponding SQL query.
2. This is a pure zero-shot prompting trail, which means no samples is given to GPT.
'''
#################### 0. Prompt ####################
SQL_generation_prompt = '''
You are an expert in SQL. I will give you a natural language question and a database schema,
please help me generate the corresponding SQL query with no further explaination.
'''
checker_prompt = '''
Please help me generate the corresponding SQL query with no further explaination.
'''
#################### 1. Set up ####################
#----------------------------------------------------------------------------------------------------------
# API_KEY = "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" # 自己的
API_KEY = "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" # 买的
# API_KEY = "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" # gpt4 孙哥
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = os.getenv("OPENAI_API_KEY")
#changed
task = 'Spider' # 1 for CoSQL, 2 for Spider
if task == 'CoSQL':
path_to_CoSQL = "./cosql_dataset"
DATASET_SCHEMA = path_to_CoSQL+"/tables.json"
DATASET = path_to_CoSQL+"/sql_state_tracking/cosql_dev.json"
OUTPUT_FILE_1 = "./predicted_sql.txt"
OUTPUT_FILE_2 = "./gold_sql.txt"
DATABASE_PATH = path_to_CoSQL+"/database"
else:
path_to_Spider = "/Users/yan/Desktop/text2sql/spider"
DATASET_SCHEMA = path_to_Spider + "/tables.json"
DATASET = path_to_Spider + "/dev.json"
OUTPUT_FILE_1 = "./Spider/predicted_sql.txt"
OUTPUT_FILE_2 = "./Spider/gold_sql.txt"
DATABASE_PATH = path_to_Spider + "/database"
# set max tokens limit
MAX_TOKENS = 4096
model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
encoding = tiktoken.encoding_for_model(model_name)
# count the token
def num_tokens_from_string(string: str, model_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# load dataset
def load_data(DATASET):
return pd.read_json(DATASET)
def find_foreign_keys_MYSQL_like(db_name):
df = spider_foreign[spider_foreign['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['First Table Name'] + '.' + row['First Table Foreign Key'] + " = " + row['Second Table Name'] + '.' + row['Second Table Foreign Key'] + ','
output= output[:-1] + "]"
return output
def find_fields_MYSQL_like(db_name):
df = spider_schema[spider_schema['Database name'] == db_name]
df = df.groupby(' Table Name')
output = ""
for name, group in df:
output += "Table " +name+ ', columns = ['
for index, row in group.iterrows():
output += row[" Field Name"]+','
output = output[:-1]
output += "]\n"
return output
def find_primary_keys_MYSQL_like(db_name):
df = spider_primary[spider_primary['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['Table Name'] + '.' + row['Primary Key'] +','
output = output[:-1]
output += "]\n"
return output
def creatiing_schema(DATASET_JSON):
schema_df = pd.read_json(DATASET_JSON)
schema_df = schema_df.drop(['column_names','table_names'], axis=1)
schema = []
f_keys = []
p_keys = []
for index, row in schema_df.iterrows():
tables = row['table_names_original']
col_names = row['column_names_original']
col_types = row['column_types']
foreign_keys = row['foreign_keys']
primary_keys = row['primary_keys']
for col, col_type in zip(col_names, col_types):
index, col_name = col
if index == -1:
for table in tables:
schema.append([row['db_id'], table, '*', 'text'])
else:
schema.append([row['db_id'], tables[index], col_name, col_type])
for primary_key in primary_keys:
index, column = col_names[primary_key]
p_keys.append([row['db_id'], tables[index], column])
for foreign_key in foreign_keys:
first, second = foreign_key
first_index, first_column = col_names[first]
second_index, second_column = col_names[second]
f_keys.append([row['db_id'], tables[first_index], tables[second_index], first_column, second_column])
spider_schema = pd.DataFrame(schema, columns=['Database name', ' Table Name', ' Field Name', ' Type'])
spider_primary = pd.DataFrame(p_keys, columns=['Database name', 'Table Name', 'Primary Key'])
spider_foreign = pd.DataFrame(f_keys,
columns=['Database name', 'First Table Name', 'Second Table Name', 'First Table Foreign Key',
'Second Table Foreign Key'])
return spider_schema,spider_primary,spider_foreign
def SQL_checker(sql, database):
# sql be like: "SELECT * FROM car_1 WHERE car_1.id = 1"
# database is the path to local xxx.sqlite
# the function of this part is to check if the sql is valid, if not, return the error message
path = DATABASE_PATH + '/' + database + '/' + database + '.sqlite'
try:
# Connect to the SQLite database
conn = sqlite3.connect(path)
# Create a cursor object to execute the SQL query
cursor = conn.cursor()
# Execute the SQL query
cursor.execute(sql)
# Commit the transaction and close the connection
conn.commit()
conn.close()
# Return a success message if the SQL query is valid
prompt = "The SQL query is valid in grammar."
checker = False
except sqlite3.Error as e:
# Return the error message if the SQL query is not valid
instruction = f"""#### the sql generated by you: {sql}, has error like :{e} , please fix the error and generate again. \n"""
fields = find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "Primary_keys = " + find_primary_keys_MYSQL_like(database)
prompt = instruction + fields + checker_prompt
checker = True
return prompt, checker
import time
def GPT4_generation(prompt):
'''
openai.error.RateLimitError: Rate limit reached for default-gpt-3.5-turbo
in organization org-GFmlumrCZBB2Y40fVv7f8qgp on requests per min. Limit: 3 / min.
Please try again in 20s. Contact us through our help center at help.openai.com if you continue to have issues.
Please add a payment method to your account to increase your rate limit.
Visit https://platform.openai.com/account/billing to add a payment method.
'''
limit_marker = False
fake_SQL = "SELECT COUNT(*) FROM singer"
while True:
try:
response = openai.ChatCompletion.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=600,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
return response['choices'][0]['message']['content'], limit_marker
except openai.error.RateLimitError as e:
print(f"RateLimitError: {e}")
print("Sleeping for 20 seconds...")
time.sleep(20)
print("Retrying...")
except Exception as e:
print(f"Unexpected error: {e}")
return fake_SQL, limit_marker
# initial the chatbot
def extract_sql(response):
matches = re.findall(r'```sql\n(.*?)\n```', response, re.DOTALL)
return matches
tokens=(
"WwiJN0oLURBx7gX_O8WVz9Fufj1iefdzkpt2fsbsb-e8al2Kvufapnj5mYa6vGo5P1ub9w.",
"WwhXnsbFLxozhOKG1-NUO78iif9IiN5El3Qk9yk5fi70TMcaUMOwfWwjTyqAyNe6MCtiEA.",
"Wwi1wxVyz-X2piJk8Ts84d08Fm1UmHDTOS7ftlD6LCXdbUVjFrQlJfl97an8UHhZQM8juQ.",
"Wwj6xMcUvzQUaKwcRQ-qvwrIcZLDBRp9XP25HkEVBAJDVZBzujepzI_dttehdJiCAjCIMg.",
"WwjMZ_TL9xIl4jREPppT5df6tAsjLLgjRo_GKK5iLslGOh5lMtstOMP_iJEADXq6gjFEKA.",
"Wgj-oa5yHxfmjo0lLybtWGLiWYoKTZ07NXcUiaPiUHmtQQiAKlfzNTOA9lwqmCz2N0qGFg."
)
def Bard_generation(prompt):
limit_marker = False
token_index = 0
chatbot = Chatbot(tokens[token_index])
answer = chatbot.ask(prompt)
print('whole answer', answer)
while True: # This loop will continue until a string is returned
if isinstance(answer, dict): # check if answer is a dictionary (error response)
limit_marker = True
print("Token limit reached, switching to a new token...")
token_index += 1 # Move to the next token
if token_index >= len(tokens): # If we've used all tokens, start over
token_index = 0
print("exceeding total limit, Waiting 15 seconds...")
time.sleep(15) # freeze for 15s
chatbot = Chatbot(tokens[token_index]) # Create a new chatbot with the new token
answer = chatbot.ask(prompt) # resend the request
else:
return answer[0][0], limit_marker
def save_breaker(breaker):
with open("breaker.txt", "w") as f:
f.write(str(breaker))
# Function to load the breaker value from a file
def load_breaker():
if os.path.exists("breaker.txt"):
with open("breaker.txt", "r") as f:
breaker = int(f.read())
if breaker > 1037:
breaker = 0
else:
breaker = breaker
return breaker
return 0
if __name__ == '__main__':
###########################################################################################
# load the data
spider_schema,spider_primary,spider_foreign = creatiing_schema(DATASET_SCHEMA)
val_df = load_data(DATASET)
SQLs_temp_pred = []
SQLs_temp_gold = []
for index,sample in val_df.iterrows():
print('index:',index)
db_id = sample['db_id'] # e.g.'car_1'
question = sample['question'] # e.g.'How many car models are produced by each maker? List the count and the maker full name.'
SQL_gold = sample['query'] # e.g.'SELECT COUNT(*) FROM car_1 WHERE car_1.id = 1'
print('SQL_gold:',SQL_gold)
schema = find_fields_MYSQL_like(db_id) + '\n' + "foreign key:" + find_foreign_keys_MYSQL_like(
db_id) + '\n' + "primary key:" + find_primary_keys_MYSQL_like(db_id) #
###############################################
'''message to GPT, to get SQL'''
message_GPT = SQL_generation_prompt + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema + \
"Just give me the plain SQL without any placeholders." + \
"\nquestion:" + question+ \
"\nYour SQL:"
print('message to GPT3.5:', message_GPT)
SQL, limit_marker = GPT4_generation(message_GPT)
print('SQL:', SQL)
SQL = SQL.replace('\n', ' ')
print('\nGPT generated SQL:', SQL + '\n')
SQLs_temp_pred.append(SQL)
SQLs_temp_gold.append(SQL_gold+'\t'+db_id)
with open ('./predicted_sql.txt', 'a') as f:
f.write(SQL+'\n')
with open ('./gold_sql.txt', 'a') as f:
f.write(SQL_gold+'\t'+db_id+'\n')
# CUDA_VISIBLE_DEVICES=7 python read_cosql.py | [
"The SQL query is valid in grammar.",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"\nYou are an expert in SQL. I will give you a natural language question and a database schema, \nplease help me generate the corresponding SQL query with no further explaination.\n",
"\nPlease help me generate the corresponding SQL query with no further explaination.\n"
] |
2024-01-10 | YanJiaHuan/Text2Sql | multi_turn~read_Cosql_Bard.py | import pandas as pd
import time
import re
import openai
import os
from os import environ
import sys
import tiktoken
import sqlite3
from Bard import Chatbot
#################### 0. Prompt ####################
SQL_generation_prompt = '''
You are an expert in SQL. I will give you a natural language question and a database schema,
please help me generate the corresponding SQL query with no further explaination.
'''
SQL_generation_prompt_Bard = '''
Help me convert natural language questions to SQL, don't explain the functionality of the SQL query.
'''
three_shots_SQL_generation_prompt = '''
Here is some examples of EASY, MEDIUM and HARD SQL queries.
SELECT count(*) FROM singer
SELECT avg(weight) , pettype FROM pets GROUP BY pettype
SELECT T1.fname , T1.age FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'dog' AND T1.stuid NOT IN (SELECT T1.stuid FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'cat')
'''
zero_shots_SQL_generation_prompt = '''
Sorry, I won't give you any examples. Please generate based on your own semantic parsing ability.
'''
one_shot_Cosql_prompt_without_explain = '''
Here is a sample of multi-turn text2sql for you to understand the task.
Table advisor, columns = [*,s_ID,i_ID]
Table classroom, columns = [*,building,room_number,capacity]
Table course, columns = [*,course_id,title,dept_name,credits]
Table department, columns = [*,dept_name,building,budget]
Table instructor, columns = [*,ID,name,dept_name,salary]
Table prereq, columns = [*,course_id,prereq_id]
Table section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]
Table student, columns = [*,ID,name,dept_name,tot_cred]
Table takes, columns = [*,ID,course_id,sec_id,semester,year,grade]
Table teaches, columns = [*,ID,course_id,sec_id,semester,year]
Table time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]
foreign key:[course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]
primary key:[classroom.building,department.dept_name,course.course_id,instructor.ID,section.course_id,teaches.ID,student.ID,takes.ID,advisor.s_ID,time_slot.time_slot_id,prereq.course_id]
Iteration 1:
Question: Find out the average salary of professors?
SELECT avg ( salary ) FROM instructor
Iteration 2: # iteration 2 will see the question and sql in iteration 1
Question: Find the average salary of the professors of each department?
SELECT avg ( salary ) , dept_name FROM instructor GROUP BY dept_name
Iteration 3: # iteration 3 will see the questiones and sqls in iteration 2 and 1
Question: Which department has the highest average salary of professors?
SELECT dept_name FROM instructor GROUP BY dept_name ORDER BY avg ( salary ) DESC LIMIT 1
'''
checker_prompt = '''
Please help me generate the corresponding SQL query with no further explaination.
'''
Contextual_prompt = '''
Now I will give you some context (question and your own answer). Please generate the corresponding SQL query with no further explaination.
'''
#################### 1. Set up ####################
#----------------------------------------------------------------------------------------------------------
# API_KEY = "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" # 自己的
API_KEY = "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" # 买的
# API_KEY = "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" # gpt4 孙哥
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = os.getenv("OPENAI_API_KEY")
#changed
task = 'CoSQL' # 1 for CoSQL, 2 for Spider
if task == 'CoSQL':
path_to_CoSQL = "./cosql_dataset"
DATASET_SCHEMA = path_to_CoSQL+"/tables.json"
DATASET = path_to_CoSQL+"/sql_state_tracking/cosql_dev.json"
OUTPUT_FILE_1 = "./predicted_sql.txt"
OUTPUT_FILE_2 = "./gold_sql.txt"
DATABASE_PATH = path_to_CoSQL+"/database"
else:
path_to_Spider = "/Users/yan/Desktop/text2sql/spider"
DATASET_SCHEMA = path_to_Spider + "/tables.json"
DATASET = path_to_Spider + "/dev.json"
OUTPUT_FILE_1 = "./Spider/predicted_sql.txt"
OUTPUT_FILE_2 = "./Spider/gold_sql.txt"
DATABASE_PATH = path_to_Spider + "/database"
# set max tokens limit
MAX_TOKENS = 4096
model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
encoding = tiktoken.encoding_for_model(model_name)
# count the token
def num_tokens_from_string(string: str, model_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# load dataset
def load_data(DATASET):
return pd.read_json(DATASET)
def find_foreign_keys_MYSQL_like(db_name):
df = spider_foreign[spider_foreign['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['First Table Name'] + '.' + row['First Table Foreign Key'] + " = " + row['Second Table Name'] + '.' + row['Second Table Foreign Key'] + ','
output= output[:-1] + "]"
return output
def find_fields_MYSQL_like(db_name):
df = spider_schema[spider_schema['Database name'] == db_name]
df = df.groupby(' Table Name')
output = ""
for name, group in df:
output += "Table " +name+ ', columns = ['
for index, row in group.iterrows():
output += row[" Field Name"]+','
output = output[:-1]
output += "]\n"
return output
def find_primary_keys_MYSQL_like(db_name):
df = spider_primary[spider_primary['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['Table Name'] + '.' + row['Primary Key'] +','
output = output[:-1]
output += "]\n"
return output
def creatiing_schema(DATASET_JSON):
schema_df = pd.read_json(DATASET_JSON)
schema_df = schema_df.drop(['column_names','table_names'], axis=1)
schema = []
f_keys = []
p_keys = []
for index, row in schema_df.iterrows():
tables = row['table_names_original']
col_names = row['column_names_original']
col_types = row['column_types']
foreign_keys = row['foreign_keys']
primary_keys = row['primary_keys']
for col, col_type in zip(col_names, col_types):
index, col_name = col
if index == -1:
for table in tables:
schema.append([row['db_id'], table, '*', 'text'])
else:
schema.append([row['db_id'], tables[index], col_name, col_type])
for primary_key in primary_keys:
index, column = col_names[primary_key]
p_keys.append([row['db_id'], tables[index], column])
for foreign_key in foreign_keys:
first, second = foreign_key
first_index, first_column = col_names[first]
second_index, second_column = col_names[second]
f_keys.append([row['db_id'], tables[first_index], tables[second_index], first_column, second_column])
spider_schema = pd.DataFrame(schema, columns=['Database name', ' Table Name', ' Field Name', ' Type'])
spider_primary = pd.DataFrame(p_keys, columns=['Database name', 'Table Name', 'Primary Key'])
spider_foreign = pd.DataFrame(f_keys,
columns=['Database name', 'First Table Name', 'Second Table Name', 'First Table Foreign Key',
'Second Table Foreign Key'])
return spider_schema,spider_primary,spider_foreign
def SQL_checker(sql, database):
# sql be like: "SELECT * FROM car_1 WHERE car_1.id = 1"
# database is the path to local xxx.sqlite
# the function of this part is to check if the sql is valid, if not, return the error message
path = DATABASE_PATH + '/' + database + '/' + database + '.sqlite'
try:
# Connect to the SQLite database
conn = sqlite3.connect(path)
# Create a cursor object to execute the SQL query
cursor = conn.cursor()
# Execute the SQL query
cursor.execute(sql)
# Commit the transaction and close the connection
conn.commit()
conn.close()
# Return a success message if the SQL query is valid
prompt = "The SQL query is valid in grammar."
checker = False
except sqlite3.Error as e:
# Return the error message if the SQL query is not valid
instruction = f"""#### the sql generated by you: {sql}, has error like :{e} , please fix the error and generate again. \n"""
fields = find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "Primary_keys = " + find_primary_keys_MYSQL_like(database)
prompt = instruction + fields + checker_prompt
checker = True
return prompt, checker
import time
def GPT4_generation(prompt):
limit_marker = False
fake_SQL = "SELECT COUNT(*) FROM singer"
while True:
try:
response = openai.ChatCompletion.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=600,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
return response['choices'][0]['message']['content'], limit_marker
except openai.error.RateLimitError as e:
print(f"RateLimitError: {e}")
print("Sleeping for 20 seconds...")
time.sleep(20)
print("Retrying...")
except Exception as e:
print(f"Unexpected error: {e}")
return fake_SQL, limit_marker
# initial the chatbot
def extract_sql(response):
matches = re.findall(r'```sql\n(.*?)\n```', response, re.DOTALL)
return matches
tokens=(
"WwiJN0oLURBx7gX_O8WVz9Fufj1iefdzkpt2fsbsb-e8al2Kvufapnj5mYa6vGo5P1ub9w.",
"WwhXnsbFLxozhOKG1-NUO78iif9IiN5El3Qk9yk5fi70TMcaUMOwfWwjTyqAyNe6MCtiEA.",
"Wwi1wxVyz-X2piJk8Ts84d08Fm1UmHDTOS7ftlD6LCXdbUVjFrQlJfl97an8UHhZQM8juQ.",
"Wwj6xMcUvzQUaKwcRQ-qvwrIcZLDBRp9XP25HkEVBAJDVZBzujepzI_dttehdJiCAjCIMg.",
"WwjMZ_TL9xIl4jREPppT5df6tAsjLLgjRo_GKK5iLslGOh5lMtstOMP_iJEADXq6gjFEKA.",
"Wgj-oa5yHxfmjo0lLybtWGLiWYoKTZ07NXcUiaPiUHmtQQiAKlfzNTOA9lwqmCz2N0qGFg."
)
def Bard_generation(prompt):
limit_marker = False
token_index = 0
chatbot = Chatbot(tokens[token_index])
answer = chatbot.ask(prompt)
while True: # This loop will continue until a string is returned
if isinstance(answer, dict): # check if answer is a dictionary (error response)
limit_marker = True
time.sleep(15) # freeze for 1 min
token_index += 1 # Move to the next token
if token_index >= len(tokens): # If we've used all tokens, start over
token_index = 0
chatbot = Chatbot(tokens[token_index]) # Create a new chatbot with the new token
answer = chatbot.ask(prompt) # resend the request
else:
SQL = answer[4][0][1] # This is a list based on your latest context
if isinstance(SQL, list) and SQL: # if SQL is a non-empty list
sql_string = SQL[0] # get the first element of the list
sql_query = extract_sql(sql_string) # extract sql query
if sql_query: # if successfully extracted
return sql_query[0], limit_marker
def save_breaker(breaker):
with open("breaker.txt", "w") as f:
f.write(str(breaker))
# Function to load the breaker value from a file
def load_breaker():
if os.path.exists("breaker.txt"):
with open("breaker.txt", "r") as f:
breaker = int(f.read())
if breaker > 1037:
breaker = 0
else:
breaker = breaker
return breaker
return 0
if __name__ == '__main__':
###########################################################################################
# load the data
spider_schema,spider_primary,spider_foreign = creatiing_schema(DATASET_SCHEMA)
val_df = load_data(DATASET)
Log_content = []
for index,sample in val_df.iterrows():
print('index:',index)
db_id = sample['database_id'] # e.g.'car_1'
question_final = sample['final']['utterance'] # e.g.'How many car models are produced by each maker? List the count and the maker full name.'
query_final = sample['final']['query'] # e.g.'SELECT COUNT(*) FROM car_1 WHERE car_1.id = 1'
schema = find_fields_MYSQL_like(db_id) + '\n' + "foreign key:" + find_foreign_keys_MYSQL_like(
db_id) + '\n' + "primary key:" + find_primary_keys_MYSQL_like(db_id) #
'''
schema: Table car_makers, columns = [*,Id,Maker,FullName,Country]
Table car_names, columns = [*,MakeId,Model,Make]
Table cars_data, columns = [*,Id,MPG,Cylinders,Edispl,Horsepower,Weight,Accelerate,Year]
Table continents, columns = [*,ContId,Continent]
Table countries, columns = [*,CountryId,CountryName,Continent]
Table model_list, columns = [*,ModelId,Maker,Model]
foreign key:[countries.Continent = continents.ContId,car_makers.Country = countries.CountryId,model_list.Maker = car_makers.Id,car_names.Model = model_list.Model,cars_data.Id = car_names.MakeId]
primary key:[continents.ContId,countries.CountryId,car_makers.Id,model_list.ModelId,car_names.MakeId,cars_data.Id]
'''
# for first round:
# input: question+db_id+schema+three_sqls
# output: sql
# for other rounds and final round:
# input: question + message + generated_sql
# output: sql
message = ''
old_message = ''
history = {}
tmp = {}
SQLs_temp_pred = []
SQLs_temp_gold = []
tmp['question'] = question_final
for round, dialog in enumerate(sample['interaction']): # assueme the goal it to output the final sql by using final question and dialog information
print(f'The {round} round of dialog in sample {index}:') # each sample has at least 1 previous conversation
question_round = dialog['utterance']
query_round = dialog['query']
if round == 0:
old_message = message + \
SQL_generation_prompt_Bard + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema + \
"\nSome samples to text2sql:" + one_shot_Cosql_prompt_without_explain
message = message + \
SQL_generation_prompt_Bard + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema + \
"\nSome samples to text2sql:" + one_shot_Cosql_prompt_without_explain+ \
"\nQuestion:" + question_round + \
"\nOutput:"
else:
message = old_message + \
Contextual_prompt + \
"\nThis is previous question:" + history['question'] + \
"\nThis is your previous generated SQl:" + history['query']+ \
"\nQuestion:" + question_round + \
"\nOutput:"
old_message = old_message + \
"\nThis is previous question:" + history['question'] + \
"\nThis is your previous generated SQl:" + history['query']
print('message:',message)
SQL, limit_marker = Bard_generation(message)
print('SQL:',SQL)
SQL = SQL.replace('\n',' ')
print('\nGPT generated SQL:',SQL+'\n')
history['question'] = question_round
history['query'] = SQL
'''
save the log and generated sql, gold sql in some file: may need to use some process as the response is like:
SELECT car_names.Model, COUNT(cars_data.Id) AS popularity
FROM car_names
JOIN cars_data ON cars_data.Id = car_names.MakeId
GROUP BY car_names.Model
ORDER BY popularity DESC;
There are '\n' in line, and I don't want it
'''
SQLs_temp_pred.append(SQL)
SQLs_temp_gold.append(query_round+'\t'+db_id)
# this loop will focus on the final round, which is the 'final' in dataset
with open ('./predicted_sql.txt','a') as f:
for line in SQLs_temp_pred:
f.write(line+'\n')
with open ('./gold_sql.txt','a') as f:
for line in SQLs_temp_gold:
f.write(line+'\n')
# CUDA_VISIBLE_DEVICES=7 python read_cosql.py | [
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"\nPlease help me generate the corresponding SQL query with no further explaination.\n",
"\nSorry, I won't give you any examples. Please generate based on your own semantic parsing ability.\n",
"The SQL query is valid in grammar.",
"\nHelp me convert natural language questions to SQL, don't explain the functionality of the SQL query.\n",
"\nYou are an expert in SQL. I will give you a natural language question and a database schema, \nplease help me generate the corresponding SQL query with no further explaination.\n",
"\nHere is some examples of EASY, MEDIUM and HARD SQL queries.\nSELECT count(*) FROM singer \nSELECT avg(weight) , pettype FROM pets GROUP BY pettype\nSELECT T1.fname , T1.age FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'dog' AND T1.stuid NOT IN (SELECT T1.stuid FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'cat')\n",
"\nNow I will give you some context (question and your own answer). Please generate the corresponding SQL query with no further explaination.\n",
"\nHere is a sample of multi-turn text2sql for you to understand the task.\nTable advisor, columns = [*,s_ID,i_ID]\nTable classroom, columns = [*,building,room_number,capacity]\nTable course, columns = [*,course_id,title,dept_name,credits]\nTable department, columns = [*,dept_name,building,budget]\nTable instructor, columns = [*,ID,name,dept_name,salary]\nTable prereq, columns = [*,course_id,prereq_id]\nTable section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]\nTable student, columns = [*,ID,name,dept_name,tot_cred]\nTable takes, columns = [*,ID,course_id,sec_id,semester,year,grade]\nTable teaches, columns = [*,ID,course_id,sec_id,semester,year]\nTable time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]\n\nforeign key:[course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]\nprimary key:[classroom.building,department.dept_name,course.course_id,instructor.ID,section.course_id,teaches.ID,student.ID,takes.ID,advisor.s_ID,time_slot.time_slot_id,prereq.course_id]\n\nIteration 1:\nQuestion: Find out the average salary of professors?\nSELECT avg ( salary ) FROM instructor\n\nIteration 2: # iteration 2 will see the question and sql in iteration 1\nQuestion: Find the average salary of the professors of each department?\nSELECT avg ( salary ) , dept_name FROM instructor GROUP BY dept_name\n\nIteration 3: # iteration 3 will see the questiones and sqls in iteration 2 and 1\nQuestion: Which department has the highest average salary of professors?\nSELECT dept_name FROM instructor GROUP BY dept_name ORDER BY avg ( salary ) DESC LIMIT 1\n\n"
] |
2024-01-10 | YanJiaHuan/Text2Sql | multi_turn~Bard_GPT~V0~temp.py | import openai
import tiktoken
import os
MAX_TOKENS = 4096
# model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
model_name = "gpt-3.5-turbo-16k"
encoding = tiktoken.encoding_for_model(model_name)
# API_KEY = "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" # 自己的
API_KEY = "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" # 买的
# API_KEY = "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" # gpt4 孙哥
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = os.getenv("OPENAI_API_KEY")
def GPT4_generation(prompt):
'''
openai.error.RateLimitError: Rate limit reached for default-gpt-3.5-turbo
in organization org-GFmlumrCZBB2Y40fVv7f8qgp on requests per min. Limit: 3 / min.
Please try again in 20s. Contact us through our help center at help.openai.com if you continue to have issues.
Please add a payment method to your account to increase your rate limit.
Visit https://platform.openai.com/account/billing to add a payment method.
'''
response = openai.ChatCompletion.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=600,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
return response['choices'][0]['message']['content']
message = '''
"Database stats: {'law': {'avg_tokens': 78.0057361376673, 'num_tables': 30, 'num_columns': 235, 'condition_count': {'orderBy': 81, 'groupBy': 239, 'having': 94, 'nested': 37, 'join': 484}}, 'thriller_novels': {'avg_tokens': 78.9419795221843, 'num_tables': 19, 'num_columns': 78, 'condition_count': {'orderBy': 35, 'groupBy': 139, 'having': 108, 'nested': 18, 'join': 288}}, 'fitness': {'avg_tokens': 73.64882943143813, 'num_tables': 22, 'num_columns': 127, 'condition_count': {'orderBy': 66, 'groupBy': 225, 'having': 4, 'nested': 81, 'join': 279}}, 'copywriting': {'avg_tokens': 51.48913043478261, 'num_tables': 12, 'num_columns': 45, 'condition_count': {'orderBy': 16, 'groupBy': 70, 'having': 30, 'nested': 2, 'join': 91}}, 'public_relations': {'avg_tokens': 63.68518518518518, 'num_tables': 44, 'num_columns': 240, 'condition_count': {'orderBy': 7, 'groupBy': 14, 'having': 8, 'nested': 17, 'join': 461}}, 'music': {'avg_tokens': 81.69047619047619, 'num_tables': 11, 'num_columns': 43, 'condition_count': {'orderBy': 34, 'groupBy': 224, 'having': 150, 'nested': 146, 'join': 250}}, 'project_management': {'avg_tokens': 61.624605678233436, 'num_tables': 26, 'num_columns': 181, 'condition_count': {'orderBy': 46, 'groupBy': 220, 'having': 13, 'nested': 62, 'join': 544}}, 'psychotherapy': {'avg_tokens': 91.22185430463576, 'num_tables': 9, 'num_columns': 66, 'condition_count': {'orderBy': 82, 'groupBy': 166, 'having': 51, 'nested': 48, 'join': 293}}, 'nursing': {'avg_tokens': 79.61953727506426, 'num_tables': 35, 'num_columns': 180, 'condition_count': {'orderBy': 30, 'groupBy': 57, 'having': 18, 'nested': 53, 'join': 363}}, 'philosophy': {'avg_tokens': 114.70414201183432, 'num_tables': 12, 'num_columns': 51, 'condition_count': {'orderBy': 0, 'groupBy': 14, 'having': 14, 'nested': 22, 'join': 166}}, 'education': {'avg_tokens': 66.6, 'num_tables': 26, 'num_columns': 68, 'condition_count': {'orderBy': 18, 'groupBy': 144, 'having': 89, 'nested': 72, 'join': 178}}, 'event_management': {'avg_tokens': 89.98163265306123, 'num_tables': 44, 'num_columns': 235, 'condition_count': {'orderBy': 187, 'groupBy': 345, 'having': 34, 'nested': 41, 'join': 434}}, 'personal_training': {'avg_tokens': 89.56218905472637, 'num_tables': 73, 'num_columns': 339, 'condition_count': {'orderBy': 46, 'groupBy': 179, 'having': 95, 'nested': 20, 'join': 357}}, 'finance': {'avg_tokens': 26.474537037037038, 'num_tables': 34, 'num_columns': 306, 'condition_count': {'orderBy': 7, 'groupBy': 25, 'having': 2, 'nested': 20, 'join': 31}}, 'painting': {'avg_tokens': 98.07296137339056, 'num_tables': 23, 'num_columns': 133, 'condition_count': {'orderBy': 5, 'groupBy': 9, 'having': 5, 'nested': 110, 'join': 461}}, 'comedy_films': {'avg_tokens': 103.91037735849056, 'num_tables': 22, 'num_columns': 38, 'condition_count': {'orderBy': 5, 'groupBy': 180, 'having': 164, 'nested': 209, 'join': 185}}, 'food_and_drink_books': {'avg_tokens': 79.81553398058253, 'num_tables': 23, 'num_columns': 33, 'condition_count': {'orderBy': 2, 'groupBy': 22, 'having': 17, 'nested': 16, 'join': 101}}, 'western_films': {'avg_tokens': 84.76744186046511, 'num_tables': 34, 'num_columns': 100, 'condition_count': {'orderBy': 60, 'groupBy': 56, 'having': 14, 'nested': 12, 'join': 113}}, 'forensic_psychology': {'avg_tokens': 76.35258358662614, 'num_tables': 49, 'num_columns': 255, 'condition_count': {'orderBy': 16, 'groupBy': 61, 'having': 10, 'nested': 71, 'join': 263}}, 'shipping_and_logistics': {'avg_tokens': 59.16417910447761, 'num_tables': 19, 'num_columns': 58, 'condition_count': {'orderBy': 11, 'groupBy': 60, 'having': 13, 'nested': 71, 'join': 86}}, 'science_fiction_films': {'avg_tokens': 68.64588528678304, 'num_tables': 26, 'num_columns': 56, 'condition_count': {'orderBy': 100, 'groupBy': 245, 'having': 124, 'nested': 139, 'join': 357}}, 'mobile_app_development': {'avg_tokens': 41.57575757575758, 'num_tables': 18, 'num_columns': 133, 'condition_count': {'orderBy': 15, 'groupBy': 17, 'having': 4, 'nested': 80, 'join': 311}}, 'travel': {'avg_tokens': 67.49659863945578, 'num_tables': 42, 'num_columns': 230, 'condition_count': {'orderBy': 46, 'groupBy': 55, 'having': 3, 'nested': 3, 'join': 134}}, 'business_intelligence': {'avg_tokens': 62.399253731343286, 'num_tables': 30, 'num_columns': 253, 'condition_count': {'orderBy': 14, 'groupBy': 130, 'having': 66, 'nested': 37, 'join': 464}}, 'accounting': {'avg_tokens': 42.649289099526065, 'num_tables': 41, 'num_columns': 246, 'condition_count': {'orderBy': 144, 'groupBy': 136, 'having': 14, 'nested': 19, 'join': 237}}, 'fictional_art': {'avg_tokens': 45.570754716981135, 'num_tables': 27, 'num_columns': 182, 'condition_count': {'orderBy': 14, 'groupBy': 45, 'having': 18, 'nested': 40, 'join': 147}}, 'user_experience_(ux)_design': {'avg_tokens': 44.04448742746615, 'num_tables': 26, 'num_columns': 176, 'condition_count': {'orderBy': 4, 'groupBy': 159, 'having': 19, 'nested': 130, 'join': 240}}, 'health_and_wellness_programs': {'avg_tokens': 92.60196078431373, 'num_tables': 35, 'num_columns': 153, 'condition_count': {'orderBy': 198, 'groupBy': 336, 'having': 108, 'nested': 35, 'join': 448}}, 'industrial_design': {'avg_tokens': 31.670212765957448, 'num_tables': 22, 'num_columns': 119, 'condition_count': {'orderBy': 14, 'groupBy': 22, 'having': 4, 'nested': 26, 'join': 34}}, 'network_security': {'avg_tokens': 79.57603686635944, 'num_tables': 18, 'num_columns': 169, 'condition_count': {'orderBy': 79, 'groupBy': 1, 'having': 1, 'nested': 0, 'join': 189}}, 'user_interface_design': {'avg_tokens': 55.40972222222222, 'num_tables': 21, 'num_columns': 137, 'condition_count': {'orderBy': 39, 'groupBy': 187, 'having': 63, 'nested': 167, 'join': 228}}, 'cooking_shows': {'avg_tokens': 48.329787234042556, 'num_tables': 24, 'num_columns': 49, 'condition_count': {'orderBy': 5, 'groupBy': 90, 'having': 72, 'nested': 35, 'join': 145}}, 'hypnotherapy': {'avg_tokens': 72.82211538461539, 'num_tables': 26, 'num_columns': 166, 'condition_count': {'orderBy': 163, 'groupBy': 227, 'having': 41, 'nested': 34, 'join': 349}}, 'operations_management': {'avg_tokens': 63.74, 'num_tables': 51, 'num_columns': 329, 'condition_count': {'orderBy': 119, 'groupBy': 207, 'having': 57, 'nested': 27, 'join': 505}}, 'cybersecurity': {'avg_tokens': 65.7202380952381, 'num_tables': 51, 'num_columns': 260, 'condition_count': {'orderBy': 35, 'groupBy': 25, 'having': 14, 'nested': 4, 'join': 277}}, 'cooking': {'avg_tokens': 62.67479674796748, 'num_tables': 16, 'num_columns': 53, 'condition_count': {'orderBy': 11, 'groupBy': 80, 'having': 63, 'nested': 32, 'join': 118}}, 'homeopathy': {'avg_tokens': 62.77325581395349, 'num_tables': 10, 'num_columns': 43, 'condition_count': {'orderBy': 0, 'groupBy': 1, 'having': 0, 'nested': 13, 'join': 105}}, 'civil_engineering': {'avg_tokens': 71.6875, 'num_tables': 44, 'num_columns': 233, 'condition_count': {'orderBy': 16, 'groupBy': 103, 'having': 35, 'nested': 54, 'join': 509}}, 'game_shows': {'avg_tokens': 111.21985815602837, 'num_tables': 22, 'num_columns': 75, 'condition_count': {'orderBy': 2, 'groupBy': 131, 'having': 113, 'nested': 74, 'join': 140}}, 'jazz': {'avg_tokens': 80.78853046594982, 'num_tables': 28, 'num_columns': 49, 'condition_count': {'orderBy': 0, 'groupBy': 95, 'having': 95, 'nested': 277, 'join': 0}}, 'sculpting': {'avg_tokens': 94.89001692047377, 'num_tables': 28, 'num_columns': 63, 'condition_count': {'orderBy': 10, 'groupBy': 69, 'having': 52, 'nested': 146, 'join': 584}}, 'computer_engineering': {'avg_tokens': 57.48913043478261, 'num_tables': 22, 'num_columns': 59, 'condition_count': {'orderBy': 7, 'groupBy': 64, 'having': 52, 'nested': 61, 'join': 62}}, 'email_marketing': {'avg_tokens': 79.49731182795699, 'num_tables': 19, 'num_columns': 116, 'condition_count': {'orderBy': 61, 'groupBy': 116, 'having': 6, 'nested': 90, 'join': 261}}, 'retail': {'avg_tokens': 59.81981981981982, 'num_tables': 28, 'num_columns': 57, 'condition_count': {'orderBy': 12, 'groupBy': 93, 'having': 47, 'nested': 24, 'join': 103}}, 'triathlon': {'avg_tokens': 54.492063492063494, 'num_tables': 10, 'num_columns': 42, 'condition_count': {'orderBy': 26, 'groupBy': 45, 'having': 11, 'nested': 3, 'join': 59}}, 'industrial_engineering': {'avg_tokens': 70.21212121212122, 'num_tables': 18, 'num_columns': 65, 'condition_count': {'orderBy': 44, 'groupBy': 43, 'having': 33, 'nested': 22, 'join': 48}}, 'food_and_drink': {'avg_tokens': 50.36065573770492, 'num_tables': 37, 'num_columns': 221, 'condition_count': {'orderBy': 127, 'groupBy': 77, 'having': 9, 'nested': 216, 'join': 212}}, 'cognitive_psychology': {'avg_tokens': 88.24096385542168, 'num_tables': 20, 'num_columns': 53, 'condition_count': {'orderBy': 29, 'groupBy': 82, 'having': 38, 'nested': 4, 'join': 82}}, 'smart_transportation': {'avg_tokens': 43.25833333333333, 'num_tables': 15, 'num_columns': 43, 'condition_count': {'orderBy': 23, 'groupBy': 90, 'having': 12, 'nested': 7, 'join': 110}}, 'environment': {'avg_tokens': 75.4055944055944, 'num_tables': 42, 'num_columns': 231, 'condition_count': {'orderBy': 178, 'groupBy': 45, 'having': 0, 'nested': 0, 'join': 279}}, 'technology': {'avg_tokens': 70.96703296703296, 'num_tables': 15, 'num_columns': 64, 'condition_count': {'orderBy': 54, 'groupBy': 184, 'having': 104, 'nested': 100, 'join': 259}}, 'home_improvement': {'avg_tokens': 68.68316831683168, 'num_tables': 17, 'num_columns': 60, 'condition_count': {'orderBy': 2, 'groupBy': 23, 'having': 21, 'nested': 77, 'join': 94}}, 'war_films': {'avg_tokens': 109.1025, 'num_tables': 24, 'num_columns': 121, 'condition_count': {'orderBy': 93, 'groupBy': 131, 'having': 53, 'nested': 8, 'join': 397}}, 'bike_sharing': {'avg_tokens': 46.177570093457945, 'num_tables': 11, 'num_columns': 101, 'condition_count': {'orderBy': 48, 'groupBy': 99, 'having': 28, 'nested': 68, 'join': 41}}, 'blockchain_technology': {'avg_tokens': 72.26627218934911, 'num_tables': 36, 'num_columns': 172, 'condition_count': {'orderBy': 305, 'groupBy': 383, 'having': 96, 'nested': 58, 'join': 460}}, 'martial_arts': {'avg_tokens': 33.43312101910828, 'num_tables': 16, 'num_columns': 65, 'condition_count': {'orderBy': 27, 'groupBy': 38, 'having': 1, 'nested': 10, 'join': 55}}, 'dance': {'avg_tokens': 50.10958904109589, 'num_tables': 42, 'num_columns': 214, 'condition_count': {'orderBy': 22, 'groupBy': 38, 'having': 5, 'nested': 10, 'join': 52}}, 'search_engine_optimization_(seo)': {'avg_tokens': 42.441295546558706, 'num_tables': 27, 'num_columns': 192, 'condition_count': {'orderBy': 75, 'groupBy': 17, 'having': 1, 'nested': 78, 'join': 223}}, 'marketing_books': {'avg_tokens': 80.30412371134021, 'num_tables': 22, 'num_columns': 128, 'condition_count': {'orderBy': 59, 'groupBy': 283, 'having': 113, 'nested': 246, 'join': 350}}, 'risk_management': {'avg_tokens': 51.08510638297872, 'num_tables': 17, 'num_columns': 68, 'condition_count': {'orderBy': 12, 'groupBy': 30, 'having': 0, 'nested': 0, 'join': 43}}, 'journaling': {'avg_tokens': 69.67721518987342, 'num_tables': 22, 'num_columns': 42, 'condition_count': {'orderBy': 10, 'groupBy': 82, 'having': 67, 'nested': 37, 'join': 151}}, 'web_development': {'avg_tokens': 59.66019417475728, 'num_tables': 28, 'num_columns': 56, 'condition_count': {'orderBy': 9, 'groupBy': 61, 'having': 37, 'nested': 27, 'join': 100}}, 'intelligent_public_safety': {'avg_tokens': 92.51626016260163, 'num_tables': 45, 'num_columns': 224, 'condition_count': {'orderBy': 11, 'groupBy': 110, 'having': 4, 'nested': 12, 'join': 219}}, 'meditation': {'avg_tokens': 72.8173076923077, 'num_tables': 17, 'num_columns': 37, 'condition_count': {'orderBy': 15, 'groupBy': 91, 'having': 25, 'nested': 5, 'join': 101}}, 'photography': {'avg_tokens': 51.037837837837834, 'num_tables': 29, 'num_columns': 181, 'condition_count': {'orderBy': 34, 'groupBy': 80, 'having': 32, 'nested': 16, 'join': 154}}, 'web_design': {'avg_tokens': 28.166666666666668, 'num_tables': 4, 'num_columns': 12, 'condition_count': {'orderBy': 1, 'groupBy': 4, 'having': 0, 'nested': 0, 'join': 8}}, 'dance_therapy': {'avg_tokens': 83.78846153846153, 'num_tables': 11, 'num_columns': 37, 'condition_count': {'orderBy': 0, 'groupBy': 12, 'having': 10, 'nested': 7, 'join': 51}}, 'astronomy': {'avg_tokens': 40.705078125, 'num_tables': 20, 'num_columns': 324, 'condition_count': {'orderBy': 14, 'groupBy': 6, 'having': 3, 'nested': 274, 'join': 47}}, 'computer_hardware': {'avg_tokens': 39.079268292682926, 'num_tables': 15, 'num_columns': 35, 'condition_count': {'orderBy': 28, 'groupBy': 87, 'having': 22, 'nested': 23, 'join': 126}}, 'human_resources_management': {'avg_tokens': 91.70652173913044, 'num_tables': 17, 'num_columns': 77, 'condition_count': {'orderBy': 288, 'groupBy': 301, 'having': 15, 'nested': 29, 'join': 339}}, 'surgery': {'avg_tokens': 65.23489932885906, 'num_tables': 16, 'num_columns': 184, 'condition_count': {'orderBy': 79, 'groupBy': 68, 'having': 14, 'nested': 20, 'join': 104}}, 'medicine': {'avg_tokens': 53.857142857142854, 'num_tables': 28, 'num_columns': 105, 'condition_count': {'orderBy': 1, 'groupBy': 3, 'having': 0, 'nested': 0, 'join': 11}}, 'writing_therapy': {'avg_tokens': 95.56831683168316, 'num_tables': 52, 'num_columns': 190, 'condition_count': {'orderBy': 39, 'groupBy': 233, 'having': 136, 'nested': 42, 'join': 486}}, 'data_analysis': {'avg_tokens': 69.18529411764706, 'num_tables': 37, 'num_columns': 230, 'condition_count': {'orderBy': 88, 'groupBy': 245, 'having': 111, 'nested': 64, 'join': 277}}, 'interior_decorating': {'avg_tokens': 50.95652173913044, 'num_tables': 13, 'num_columns': 97, 'condition_count': {'orderBy': 34, 'groupBy': 83, 'having': 31, 'nested': 12, 'join': 199}}, 'internet_of_things_(iot)': {'avg_tokens': 76.71458333333334, 'num_tables': 22, 'num_columns': 119, 'condition_count': {'orderBy': 175, 'groupBy': 164, 'having': 9, 'nested': 19, 'join': 429}}, 'movies': {'avg_tokens': 72.78048780487805, 'num_tables': 12, 'num_columns': 34, 'condition_count': {'orderBy': 8, 'groupBy': 41, 'having': 15, 'nested': 49, 'join': 55}}, 'economics': {'avg_tokens': 50.16858237547893, 'num_tables': 34, 'num_columns': 189, 'condition_count': {'orderBy': 19, 'groupBy': 51, 'having': 14, 'nested': 15, 'join': 211}}, 'basketball': {'avg_tokens': 78.6355748373102, 'num_tables': 32, 'num_columns': 367, 'condition_count': {'orderBy': 132, 'groupBy': 271, 'having': 192, 'nested': 91, 'join': 409}}, 'action_films': {'avg_tokens': 89.02995391705069, 'num_tables': 28, 'num_columns': 88, 'condition_count': {'orderBy': 104, 'groupBy': 225, 'having': 135, 'nested': 218, 'join': 396}}, 'illustration': {'avg_tokens': 78.50737463126843, 'num_tables': 36, 'num_columns': 138, 'condition_count': {'orderBy': 40, 'groupBy': 112, 'having': 43, 'nested': 52, 'join': 289}}, 'ecology': {'avg_tokens': 120.67168674698796, 'num_tables': 24, 'num_columns': 110, 'condition_count': {'orderBy': 5, 'groupBy': 96, 'having': 80, 'nested': 32, 'join': 317}}, 'virtual_event_planning': {'avg_tokens': 36.78072289156626, 'num_tables': 23, 'num_columns': 198, 'condition_count': {'orderBy': 9, 'groupBy': 27, 'having': 4, 'nested': 43, 'join': 60}}, 'acupuncture': {'avg_tokens': 87.00806451612904, 'num_tables': 11, 'num_columns': 54, 'condition_count': {'orderBy': 1, 'groupBy': 68, 'having': 65, 'nested': 43, 'join': 370}}, 'glass_etching': {'avg_tokens': 67.32491582491582, 'num_tables': 28, 'num_columns': 175, 'condition_count': {'orderBy': 254, 'groupBy': 304, 'having': 183, 'nested': 53, 'join': 383}}, 'cloud_computing': {'avg_tokens': 49.02127659574468, 'num_tables': 23, 'num_columns': 66, 'condition_count': {'orderBy': 46, 'groupBy': 109, 'having': 38, 'nested': 24, 'join': 182}}, 'business_strategy': {'avg_tokens': 81.68662674650699, 'num_tables': 45, 'num_columns': 194, 'condition_count': {'orderBy': 325, 'groupBy': 396, 'having': 155, 'nested': 36, 'join': 476}}, 'history': {'avg_tokens': 32.0, 'num_tables': 23, 'num_columns': 69, 'condition_count': {'orderBy': 35, 'groupBy': 0, 'having': 0, 'nested': 1, 'join': 51}}, 'epidemiology': {'avg_tokens': 71.21212121212122, 'num_tables': 17, 'num_columns': 71, 'condition_count': {'orderBy': 14, 'groupBy': 59, 'having': 18, 'nested': 16, 'join': 126}}, 'snowboarding': {'avg_tokens': 70.97410358565737, 'num_tables': 50, 'num_columns': 270, 'condition_count': {'orderBy': 65, 'groupBy': 213, 'having': 10, 'nested': 111, 'join': 298}}, 'content_marketing': {'avg_tokens': 38.111111111111114, 'num_tables': 26, 'num_columns': 155, 'condition_count': {'orderBy': 12, 'groupBy': 22, 'having': 6, 'nested': 3, 'join': 24}}, 'music_therapy': {'avg_tokens': 33.476190476190474, 'num_tables': 70, 'num_columns': 355, 'condition_count': {'orderBy': 17, 'groupBy': 20, 'having': 1, 'nested': 18, 'join': 35}}, 'intelligent_transportation_systems': {'avg_tokens': 74.82116788321167, 'num_tables': 26, 'num_columns': 146, 'condition_count': {'orderBy': 24, 'groupBy': 57, 'having': 6, 'nested': 30, 'join': 188}}, 'oceanography': {'avg_tokens': 25.286516853932586, 'num_tables': 42, 'num_columns': 96, 'condition_count': {'orderBy': 0, 'groupBy': 0, 'having': 0, 'nested': 30, 'join': 0}}, 'public_health': {'avg_tokens': 86.76158940397352, 'num_tables': 49, 'num_columns': 247, 'condition_count': {'orderBy': 62, 'groupBy': 114, 'having': 80, 'nested': 20, 'join': 232}}, 'forensic_science': {'avg_tokens': 64.23880597014926, 'num_tables': 18, 'num_columns': 70, 'condition_count': {'orderBy': 0, 'groupBy': 24, 'having': 23, 'nested': 0, 'join': 133}}, 'sports_officiating': {'avg_tokens': 80.38783269961978, 'num_tables': 42, 'num_columns': 131, 'condition_count': {'orderBy': 121, 'groupBy': 206, 'having': 23, 'nested': 2, 'join': 260}}, 'counseling': {'avg_tokens': 91.67955801104972, 'num_tables': 24, 'num_columns': 160, 'condition_count': {'orderBy': 108, 'groupBy': 240, 'having': 65, 'nested': 28, 'join': 335}}, 'smart_lighting': {'avg_tokens': 113.11538461538461, 'num_tables': 31, 'num_columns': 161, 'condition_count': {'orderBy': 98, 'groupBy': 77, 'having': 12, 'nested': 47, 'join': 139}}, 'yoga_therapy': {'avg_tokens': 49.282051282051285, 'num_tables': 19, 'num_columns': 38, 'condition_count': {'orderBy': 5, 'groupBy': 28, 'having': 15, 'nested': 8, 'join': 70}}, 'pharmacy': {'avg_tokens': 68.40387722132472, 'num_tables': 39, 'num_columns': 241, 'condition_count': {'orderBy': 241, 'groupBy': 286, 'having': 57, 'nested': 204, 'join': 372}}, 'advertising': {'avg_tokens': 83.16448598130842, 'num_tables': 33, 'num_columns': 170, 'condition_count': {'orderBy': 17, 'groupBy': 155, 'having': 3, 'nested': 12, 'join': 479}}, 'graphic_novels': {'avg_tokens': 88.40163934426229, 'num_tables': 40, 'num_columns': 133, 'condition_count': {'orderBy': 15, 'groupBy': 98, 'having': 76, 'nested': 140, 'join': 416}}, 'romance_films': {'avg_tokens': 66.01269035532995, 'num_tables': 26, 'num_columns': 67, 'condition_count': {'orderBy': 42, 'groupBy': 3, 'having': 1, 'nested': 6, 'join': 383}}, 'stand-up_comedy': {'avg_tokens': 78.30058939096267, 'num_tables': 38, 'num_columns': 206, 'condition_count': {'orderBy': 16, 'groupBy': 131, 'having': 32, 'nested': 45, 'join': 464}}, 'geology': {'avg_tokens': 66.46268656716418, 'num_tables': 19, 'num_columns': 83, 'condition_count': {'orderBy': 0, 'groupBy': 16, 'having': 1, 'nested': 6, 'join': 127}}, 'business_development': {'avg_tokens': 39.723214285714285, 'num_tables': 14, 'num_columns': 44, 'condition_count': {'orderBy': 29, 'groupBy': 36, 'having': 10, 'nested': 38, 'join': 62}}, 'comic_books': {'avg_tokens': 103.51351351351352, 'num_tables': 45, 'num_columns': 107, 'condition_count': {'orderBy': 152, 'groupBy': 174, 'having': 44, 'nested': 102, 'join': 400}}, 'drama_films': {'avg_tokens': 57.57377049180328, 'num_tables': 15, 'num_columns': 37, 'condition_count': {'orderBy': 32, 'groupBy': 83, 'having': 39, 'nested': 11, 'join': 118}}, 'information_security': {'avg_tokens': 68.56862745098039, 'num_tables': 21, 'num_columns': 47, 'condition_count': {'orderBy': 7, 'groupBy': 21, 'having': 9, 'nested': 15, 'join': 47}}, 'gardening': {'avg_tokens': 44.51520912547529, 'num_tables': 23, 'num_columns': 219, 'condition_count': {'orderBy': 10, 'groupBy': 18, 'having': 4, 'nested': 27, 'join': 211}}, 'virtual_reality': {'avg_tokens': 122.95846645367412, 'num_tables': 31, 'num_columns': 84, 'condition_count': {'orderBy': 68, 'groupBy': 70, 'having': 12, 'nested': 141, 'join': 246}}, 'craft_beer_brewing': {'avg_tokens': 68.23140495867769, 'num_tables': 23, 'num_columns': 55, 'condition_count': {'orderBy': 6, 'groupBy': 65, 'having': 24, 'nested': 57, 'join': 100}}, 'ui_ux_design': {'avg_tokens': 58.99175824175824, 'num_tables': 21, 'num_columns': 145, 'condition_count': {'orderBy': 42, 'groupBy': 127, 'having': 73, 'nested': 190, 'join': 296}}, 'botany': {'avg_tokens': 99.32, 'num_tables': 56, 'num_columns': 241, 'condition_count': {'orderBy': 11, 'groupBy': 19, 'having': 5, 'nested': 21, 'join': 390}}, 'screenwriting': {'avg_tokens': 31.367346938775512, 'num_tables': 13, 'num_columns': 36, 'condition_count': {'orderBy': 9, 'groupBy': 25, 'having': 14, 'nested': 22, 'join': 42}}, 'sports_medicine': {'avg_tokens': 57.9236641221374, 'num_tables': 33, 'num_columns': 200, 'condition_count': {'orderBy': 41, 'groupBy': 91, 'having': 20, 'nested': 13, 'join': 111}}, 'big_data': {'avg_tokens': 106.31428571428572, 'num_tables': 16, 'num_columns': 121, 'condition_count': {'orderBy': 12, 'groupBy': 220, 'having': 100, 'nested': 244, 'join': 344}}, 'musical_theater': {'avg_tokens': 75.50462107208872, 'num_tables': 39, 'num_columns': 239, 'condition_count': {'orderBy': 398, 'groupBy': 442, 'having': 70, 'nested': 28, 'join': 498}}, 'archaeology': {'avg_tokens': 58.99090909090909, 'num_tables': 28, 'num_columns': 133, 'condition_count': {'orderBy': 1, 'groupBy': 84, 'having': 3, 'nested': 0, 'join': 91}}, 'pilates': {'avg_tokens': 70.96041666666666, 'num_tables': 40, 'num_columns': 239, 'condition_count': {'orderBy': 149, 'groupBy': 334, 'having': 128, 'nested': 89, 'join': 413}}, 'data_mining': {'avg_tokens': 39.613995485327315, 'num_tables': 38, 'num_columns': 206, 'condition_count': {'orderBy': 52, 'groupBy': 177, 'having': 38, 'nested': 87, 'join': 223}}, 'marine_biology': {'avg_tokens': 43.89165186500888, 'num_tables': 30, 'num_columns': 216, 'condition_count': {'orderBy': 26, 'groupBy': 124, 'having': 3, 'nested': 14, 'join': 219}}, 'cosmetics_making': {'avg_tokens': 60.473794549266245, 'num_tables': 26, 'num_columns': 180, 'condition_count': {'orderBy': 129, 'groupBy': 242, 'having': 100, 'nested': 107, 'join': 412}}, 'mobile_app_design': {'avg_tokens': 80.5, 'num_tables': 18, 'num_columns': 34, 'condition_count': {'orderBy': 5, 'groupBy': 66, 'having': 50, 'nested': 53, 'join': 102}}, 'game_design': {'avg_tokens': 71.42736842105263, 'num_tables': 24, 'num_columns': 88, 'condition_count': {'orderBy': 7, 'groupBy': 57, 'having': 51, 'nested': 4, 'join': 355}}, 'high-speed_trains': {'avg_tokens': 47.72115384615385, 'num_tables': 29, 'num_columns': 281, 'condition_count': {'orderBy': 14, 'groupBy': 23, 'having': 4, 'nested': 9, 'join': 33}}, 'cardiovascular_training': {'avg_tokens': 31.133333333333333, 'num_tables': 15, 'num_columns': 38, 'condition_count': {'orderBy': 0, 'groupBy': 0, 'having': 0, 'nested': 29, 'join': 15}}, 'documentary_films': {'avg_tokens': 86.6829268292683, 'num_tables': 43, 'num_columns': 182, 'condition_count': {'orderBy': 9, 'groupBy': 15, 'having': 3, 'nested': 5, 'join': 403}}, 'business': {'avg_tokens': 81.501953125, 'num_tables': 36, 'num_columns': 226, 'condition_count': {'orderBy': 375, 'groupBy': 396, 'having': 95, 'nested': 32, 'join': 443}}, 'entomology': {'avg_tokens': 93.86190476190477, 'num_tables': 23, 'num_columns': 131, 'condition_count': {'orderBy': 62, 'groupBy': 74, 'having': 19, 'nested': 2, 'join': 192}}, 'data_analytics': {'avg_tokens': 70.15189873417721, 'num_tables': 27, 'num_columns': 62, 'condition_count': {'orderBy': 28, 'groupBy': 194, 'having': 68, 'nested': 8, 'join': 237}}, 'agriculture': {'avg_tokens': 59.526881720430104, 'num_tables': 13, 'num_columns': 67, 'condition_count': {'orderBy': 30, 'groupBy': 110, 'having': 55, 'nested': 73, 'join': 131}}, 'soapstone_carving': {'avg_tokens': 70.12338858195211, 'num_tables': 26, 'num_columns': 153, 'condition_count': {'orderBy': 326, 'groupBy': 405, 'having': 67, 'nested': 41, 'join': 443}}, 'boxing': {'avg_tokens': 83.93333333333334, 'num_tables': 17, 'num_columns': 71, 'condition_count': {'orderBy': 1, 'groupBy': 91, 'having': 35, 'nested': 31, 'join': 118}}, 'leadership': {'avg_tokens': 78.41233766233766, 'num_tables': 42, 'num_columns': 232, 'condition_count': {'orderBy': 78, 'groupBy': 100, 'having': 8, 'nested': 18, 'join': 249}}, 'sustainability': {'avg_tokens': 95.42560553633218, 'num_tables': 27, 'num_columns': 144, 'condition_count': {'orderBy': 210, 'groupBy': 234, 'having': 63, 'nested': 55, 'join': 272}}, 'athletic_training': {'avg_tokens': 94.0730593607306, 'num_tables': 47, 'num_columns': 300, 'condition_count': {'orderBy': 68, 'groupBy': 298, 'having': 183, 'nested': 194, 'join': 388}}, 'interior_design': {'avg_tokens': 56.31404958677686, 'num_tables': 22, 'num_columns': 54, 'condition_count': {'orderBy': 78, 'groupBy': 82, 'having': 60, 'nested': 28, 'join': 105}}, 'talk_shows': {'avg_tokens': 81.28070175438596, 'num_tables': 12, 'num_columns': 51, 'condition_count': {'orderBy': 68, 'groupBy': 111, 'having': 66, 'nested': 88, 'join': 166}}, 'volleyball': {'avg_tokens': 140.7431693989071, 'num_tables': 23, 'num_columns': 61, 'condition_count': {'orderBy': 2, 'groupBy': 527, 'having': 518, 'nested': 24, 'join': 547}}, 'makeup_artistry': {'avg_tokens': 63.06060606060606, 'num_tables': 36, 'num_columns': 236, 'condition_count': {'orderBy': 86, 'groupBy': 135, 'having': 28, 'nested': 24, 'join': 162}}, 'coaching': {'avg_tokens': 40.443697478991595, 'num_tables': 24, 'num_columns': 159, 'condition_count': {'orderBy': 43, 'groupBy': 83, 'having': 17, 'nested': 34, 'join': 277}}, 'tv_shows': {'avg_tokens': 98.33707865168539, 'num_tables': 63, 'num_columns': 245, 'condition_count': {'orderBy': 10, 'groupBy': 144, 'having': 133, 'nested': 206, 'join': 301}}, 'augmented_reality': {'avg_tokens': 45.3421052631579, 'num_tables': 46, 'num_columns': 238, 'condition_count': {'orderBy': 32, 'groupBy': 64, 'having': 6, 'nested': 15, 'join': 56}}, 'anthropology': {'avg_tokens': 66.04255319148936, 'num_tables': 19, 'num_columns': 62, 'condition_count': {'orderBy': 15, 'groupBy': 43, 'having': 21, 'nested': 0, 'join': 127}}, 'business_books': {'avg_tokens': 84.71030042918454, 'num_tables': 35, 'num_columns': 166, 'condition_count': {'orderBy': 295, 'groupBy': 365, 'having': 25, 'nested': 25, 'join': 405}}, 'horror_films': {'avg_tokens': 97.93159609120521, 'num_tables': 56, 'num_columns': 201, 'condition_count': {'orderBy': 240, 'groupBy': 207, 'having': 59, 'nested': 12, 'join': 234}}, 'electronic_music': {'avg_tokens': 77.93483709273183, 'num_tables': 33, 'num_columns': 72, 'condition_count': {'orderBy': 112, 'groupBy': 322, 'having': 233, 'nested': 97, 'join': 394}}, 'sociology': {'avg_tokens': 92.0304347826087, 'num_tables': 28, 'num_columns': 76, 'condition_count': {'orderBy': 4, 'groupBy': 182, 'having': 98, 'nested': 120, 'join': 210}}, 'psychology_books': {'avg_tokens': 86.65876777251185, 'num_tables': 16, 'num_columns': 105, 'condition_count': {'orderBy': 205, 'groupBy': 252, 'having': 104, 'nested': 110, 'join': 392}}, 'energy_management': {'avg_tokens': 37.725806451612904, 'num_tables': 10, 'num_columns': 42, 'condition_count': {'orderBy': 22, 'groupBy': 23, 'having': 1, 'nested': 2, 'join': 0}}, 'news_programs': {'avg_tokens': 97.97849462365592, 'num_tables': 32, 'num_columns': 81, 'condition_count': {'orderBy': 25, 'groupBy': 89, 'having': 57, 'nested': 203, 'join': 246}}, 'country_music': {'avg_tokens': 60.895890410958906, 'num_tables': 31, 'num_columns': 59, 'condition_count': {'orderBy': 202, 'groupBy': 160, 'having': 67, 'nested': 46, 'join': 351}}, 'chiropractic': {'avg_tokens': 103.82035928143712, 'num_tables': 17, 'num_columns': 60, 'condition_count': {'orderBy': 0, 'groupBy': 0, 'having': 0, 'nested': 0, 'join': 166}}, 'developmental_psychology': {'avg_tokens': 90.43450479233226, 'num_tables': 22, 'num_columns': 77, 'condition_count': {'orderBy': 62, 'groupBy': 103, 'having': 19, 'nested': 106, 'join': 300}}, 'political_science': {'avg_tokens': 38.54703832752613, 'num_tables': 29, 'num_columns': 165, 'condition_count': {'orderBy': 125, 'groupBy': 77, 'having': 17, 'nested': 110, 'join': 71}}, 'sports_psychology': {'avg_tokens': 100.74709976798144, 'num_tables': 26, 'num_columns': 158, 'condition_count': {'orderBy': 128, 'groupBy': 116, 'having': 8, 'nested': 36, 'join': 360}}, 'tai_chi': {'avg_tokens': 85.71348314606742, 'num_tables': 13, 'num_columns': 57, 'condition_count': {'orderBy': 1, 'groupBy': 31, 'having': 24, 'nested': 51, 'join': 173}}, 'energy': {'avg_tokens': 50.63522012578616, 'num_tables': 19, 'num_columns': 120, 'condition_count': {'orderBy': 5, 'groupBy': 133, 'having': 0, 'nested': 30, 'join': 141}}, 'fitness_instruction': {'avg_tokens': 38.02325581395349, 'num_tables': 30, 'num_columns': 242, 'condition_count': {'orderBy': 14, 'groupBy': 26, 'having': 2, 'nested': 0, 'join': 31}}, 'corporate_social_responsibility': {'avg_tokens': 76.6875, 'num_tables': 13, 'num_columns': 64, 'condition_count': {'orderBy': 1, 'groupBy': 131, 'having': 3, 'nested': 12, 'join': 177}}, 'genetics': {'avg_tokens': 64.12253829321664, 'num_tables': 43, 'num_columns': 207, 'condition_count': {'orderBy': 3, 'groupBy': 8, 'having': 6, 'nested': 11, 'join': 421}}}
### Desired output format:
law &
78 &
30 &
235 &
523 &
81 &
239 &
94 &
37 &
484 \\
...
### help me continue the organization of the rest data
'''
# result = GPT4_generation(message)
# print(result)
import json
import ast
import statistics
data = '''
{'law': {'avg_tokens': 78.0057361376673, 'num_tables': 30, 'num_columns': 235, 'sql_count': 523, 'condition_count': {'orderBy': 81, 'groupBy': 239, 'having': 94, 'nested': 37, 'join': 484}}, 'thriller_novels': {'avg_tokens': 78.9419795221843, 'num_tables': 19, 'num_columns': 78, 'sql_count': 293, 'condition_count': {'orderBy': 35, 'groupBy': 139, 'having': 108, 'nested': 18, 'join': 288}}, 'fitness': {'avg_tokens': 73.64882943143813, 'num_tables': 22, 'num_columns': 127, 'sql_count': 299, 'condition_count': {'orderBy': 66, 'groupBy': 225, 'having': 4, 'nested': 81, 'join': 279}}, 'copywriting': {'avg_tokens': 51.48913043478261, 'num_tables': 12, 'num_columns': 45, 'sql_count': 92, 'condition_count': {'orderBy': 16, 'groupBy': 70, 'having': 30, 'nested': 2, 'join': 91}}, 'public_relations': {'avg_tokens': 63.68518518518518, 'num_tables': 44, 'num_columns': 240, 'sql_count': 486, 'condition_count': {'orderBy': 7, 'groupBy': 14, 'having': 8, 'nested': 17, 'join': 461}}, 'music': {'avg_tokens': 81.69047619047619, 'num_tables': 11, 'num_columns': 43, 'sql_count': 252, 'condition_count': {'orderBy': 34, 'groupBy': 224, 'having': 150, 'nested': 146, 'join': 250}}, 'project_management': {'avg_tokens': 61.624605678233436, 'num_tables': 26, 'num_columns': 181, 'sql_count': 634, 'condition_count': {'orderBy': 46, 'groupBy': 220, 'having': 13, 'nested': 62, 'join': 544}}, 'psychotherapy': {'avg_tokens': 91.22185430463576, 'num_tables': 9, 'num_columns': 66, 'sql_count': 302, 'condition_count': {'orderBy': 82, 'groupBy': 166, 'having': 51, 'nested': 48, 'join': 293}}, 'nursing': {'avg_tokens': 79.61953727506426, 'num_tables': 35, 'num_columns': 180, 'sql_count': 389, 'condition_count': {'orderBy': 30, 'groupBy': 57, 'having': 18, 'nested': 53, 'join': 363}}, 'philosophy': {'avg_tokens': 114.70414201183432, 'num_tables': 12, 'num_columns': 51, 'sql_count': 169, 'condition_count': {'orderBy': 0, 'groupBy': 14, 'having': 14, 'nested': 22, 'join': 166}}, 'education': {'avg_tokens': 66.6, 'num_tables': 26, 'num_columns': 68, 'sql_count': 185, 'condition_count': {'orderBy': 18, 'groupBy': 144, 'having': 89, 'nested': 72, 'join': 178}}, 'event_management': {'avg_tokens': 89.98163265306123, 'num_tables': 44, 'num_columns': 235, 'sql_count': 490, 'condition_count': {'orderBy': 187, 'groupBy': 345, 'having': 34, 'nested': 41, 'join': 434}}, 'personal_training': {'avg_tokens': 89.56218905472637, 'num_tables': 73, 'num_columns': 339, 'sql_count': 402, 'condition_count': {'orderBy': 46, 'groupBy': 179, 'having': 95, 'nested': 20, 'join': 357}}, 'finance': {'avg_tokens': 26.474537037037038, 'num_tables': 34, 'num_columns': 306, 'sql_count': 432, 'condition_count': {'orderBy': 7, 'groupBy': 25, 'having': 2, 'nested': 20, 'join': 31}}, 'painting': {'avg_tokens': 98.07296137339056, 'num_tables': 23, 'num_columns': 133, 'sql_count': 466, 'condition_count': {'orderBy': 5, 'groupBy': 9, 'having': 5, 'nested': 110, 'join': 461}}, 'comedy_films': {'avg_tokens': 103.91037735849056, 'num_tables': 22, 'num_columns': 38, 'sql_count': 212, 'condition_count': {'orderBy': 5, 'groupBy': 180, 'having': 164, 'nested': 209, 'join': 185}}, 'food_and_drink_books': {'avg_tokens': 79.81553398058253, 'num_tables': 23, 'num_columns': 33, 'sql_count': 103, 'condition_count': {'orderBy': 2, 'groupBy': 22, 'having': 17, 'nested': 16, 'join': 101}}, 'western_films': {'avg_tokens': 84.76744186046511, 'num_tables': 34, 'num_columns': 100, 'sql_count': 129, 'condition_count': {'orderBy': 60, 'groupBy': 56, 'having': 14, 'nested': 12, 'join': 113}}, 'forensic_psychology': {'avg_tokens': 76.35258358662614, 'num_tables': 49, 'num_columns': 255, 'sql_count': 329, 'condition_count': {'orderBy': 16, 'groupBy': 61, 'having': 10, 'nested': 71, 'join': 263}}, 'shipping_and_logistics': {'avg_tokens': 59.16417910447761, 'num_tables': 19, 'num_columns': 58, 'sql_count': 134, 'condition_count': {'orderBy': 11, 'groupBy': 60, 'having': 13, 'nested': 71, 'join': 86}}, 'science_fiction_films': {'avg_tokens': 68.64588528678304, 'num_tables': 26, 'num_columns': 56, 'sql_count': 401, 'condition_count': {'orderBy': 100, 'groupBy': 245, 'having': 124, 'nested': 139, 'join': 357}}, 'mobile_app_development': {'avg_tokens': 41.57575757575758, 'num_tables': 18, 'num_columns': 133, 'sql_count': 528, 'condition_count': {'orderBy': 15, 'groupBy': 17, 'having': 4, 'nested': 80, 'join': 311}}, 'travel': {'avg_tokens': 67.49659863945578, 'num_tables': 42, 'num_columns': 230, 'sql_count': 147, 'condition_count': {'orderBy': 46, 'groupBy': 55, 'having': 3, 'nested': 3, 'join': 134}}, 'business_intelligence': {'avg_tokens': 62.399253731343286, 'num_tables': 30, 'num_columns': 253, 'sql_count': 536, 'condition_count': {'orderBy': 14, 'groupBy': 130, 'having': 66, 'nested': 37, 'join': 464}}, 'accounting': {'avg_tokens': 42.649289099526065, 'num_tables': 41, 'num_columns': 246, 'sql_count': 422, 'condition_count': {'orderBy': 144, 'groupBy': 136, 'having': 14, 'nested': 19, 'join': 237}}, 'fictional_art': {'avg_tokens': 45.570754716981135, 'num_tables': 27, 'num_columns': 182, 'sql_count': 212, 'condition_count': {'orderBy': 14, 'groupBy': 45, 'having': 18, 'nested': 40, 'join': 147}}, 'user_experience_(ux)_design': {'avg_tokens': 44.04448742746615, 'num_tables': 26, 'num_columns': 176, 'sql_count': 517, 'condition_count': {'orderBy': 4, 'groupBy': 159, 'having': 19, 'nested': 130, 'join': 240}}, 'health_and_wellness_programs': {'avg_tokens': 92.60196078431373, 'num_tables': 35, 'num_columns': 153, 'sql_count': 510, 'condition_count': {'orderBy': 198, 'groupBy': 336, 'having': 108, 'nested': 35, 'join': 448}}, 'industrial_design': {'avg_tokens': 31.670212765957448, 'num_tables': 22, 'num_columns': 119, 'sql_count': 94, 'condition_count': {'orderBy': 14, 'groupBy': 22, 'having': 4, 'nested': 26, 'join': 34}}, 'network_security': {'avg_tokens': 79.57603686635944, 'num_tables': 18, 'num_columns': 169, 'sql_count': 217, 'condition_count': {'orderBy': 79, 'groupBy': 1, 'having': 1, 'nested': 0, 'join': 189}}, 'user_interface_design': {'avg_tokens': 55.40972222222222, 'num_tables': 21, 'num_columns': 137, 'sql_count': 576, 'condition_count': {'orderBy': 39, 'groupBy': 187, 'having': 63, 'nested': 167, 'join': 228}}, 'cooking_shows': {'avg_tokens': 48.329787234042556, 'num_tables': 24, 'num_columns': 49, 'sql_count': 188, 'condition_count': {'orderBy': 5, 'groupBy': 90, 'having': 72, 'nested': 35, 'join': 145}}, 'hypnotherapy': {'avg_tokens': 72.82211538461539, 'num_tables': 26, 'num_columns': 166, 'sql_count': 416, 'condition_count': {'orderBy': 163, 'groupBy': 227, 'having': 41, 'nested': 34, 'join': 349}}, 'operations_management': {'avg_tokens': 63.74, 'num_tables': 51, 'num_columns': 329, 'sql_count': 550, 'condition_count': {'orderBy': 119, 'groupBy': 207, 'having': 57, 'nested': 27, 'join': 505}}, 'cybersecurity': {'avg_tokens': 65.7202380952381, 'num_tables': 51, 'num_columns': 260, 'sql_count': 336, 'condition_count': {'orderBy': 35, 'groupBy': 25, 'having': 14, 'nested': 4, 'join': 277}}, 'cooking': {'avg_tokens': 62.67479674796748, 'num_tables': 16, 'num_columns': 53, 'sql_count': 123, 'condition_count': {'orderBy': 11, 'groupBy': 80, 'having': 63, 'nested': 32, 'join': 118}}, 'homeopathy': {'avg_tokens': 62.77325581395349, 'num_tables': 10, 'num_columns': 43, 'sql_count': 172, 'condition_count': {'orderBy': 0, 'groupBy': 1, 'having': 0, 'nested': 13, 'join': 105}}, 'civil_engineering': {'avg_tokens': 71.6875, 'num_tables': 44, 'num_columns': 233, 'sql_count': 560, 'condition_count': {'orderBy': 16, 'groupBy': 103, 'having': 35, 'nested': 54, 'join': 509}}, 'game_shows': {'avg_tokens': 111.21985815602837, 'num_tables': 22, 'num_columns': 75, 'sql_count': 141, 'condition_count': {'orderBy': 2, 'groupBy': 131, 'having': 113, 'nested': 74, 'join': 140}}, 'jazz': {'avg_tokens': 80.78853046594982, 'num_tables': 28, 'num_columns': 49, 'sql_count': 279, 'condition_count': {'orderBy': 0, 'groupBy': 95, 'having': 95, 'nested': 277, 'join': 0}}, 'sculpting': {'avg_tokens': 94.89001692047377, 'num_tables': 28, 'num_columns': 63, 'sql_count': 591, 'condition_count': {'orderBy': 10, 'groupBy': 69, 'having': 52, 'nested': 146, 'join': 584}}, 'computer_engineering': {'avg_tokens': 57.48913043478261, 'num_tables': 22, 'num_columns': 59, 'sql_count': 92, 'condition_count': {'orderBy': 7, 'groupBy': 64, 'having': 52, 'nested': 61, 'join': 62}}, 'email_marketing': {'avg_tokens': 79.49731182795699, 'num_tables': 19, 'num_columns': 116, 'sql_count': 372, 'condition_count': {'orderBy': 61, 'groupBy': 116, 'having': 6, 'nested': 90, 'join': 261}}, 'retail': {'avg_tokens': 59.81981981981982, 'num_tables': 28, 'num_columns': 57, 'sql_count': 111, 'condition_count': {'orderBy': 12, 'groupBy': 93, 'having': 47, 'nested': 24, 'join': 103}}, 'triathlon': {'avg_tokens': 54.492063492063494, 'num_tables': 10, 'num_columns': 42, 'sql_count': 63, 'condition_count': {'orderBy': 26, 'groupBy': 45, 'having': 11, 'nested': 3, 'join': 59}}, 'industrial_engineering': {'avg_tokens': 70.21212121212122, 'num_tables': 18, 'num_columns': 65, 'sql_count': 66, 'condition_count': {'orderBy': 44, 'groupBy': 43, 'having': 33, 'nested': 22, 'join': 48}}, 'food_and_drink': {'avg_tokens': 50.36065573770492, 'num_tables': 37, 'num_columns': 221, 'sql_count': 488, 'condition_count': {'orderBy': 127, 'groupBy': 77, 'having': 9, 'nested': 216, 'join': 212}}, 'cognitive_psychology': {'avg_tokens': 88.24096385542168, 'num_tables': 20, 'num_columns': 53, 'sql_count': 83, 'condition_count': {'orderBy': 29, 'groupBy': 82, 'having': 38, 'nested': 4, 'join': 82}}, 'smart_transportation': {'avg_tokens': 43.25833333333333, 'num_tables': 15, 'num_columns': 43, 'sql_count': 120, 'condition_count': {'orderBy': 23, 'groupBy': 90, 'having': 12, 'nested': 7, 'join': 110}}, 'environment': {'avg_tokens': 75.4055944055944, 'num_tables': 42, 'num_columns': 231, 'sql_count': 286, 'condition_count': {'orderBy': 178, 'groupBy': 45, 'having': 0, 'nested': 0, 'join': 279}}, 'technology': {'avg_tokens': 70.96703296703296, 'num_tables': 15, 'num_columns': 64, 'sql_count': 273, 'condition_count': {'orderBy': 54, 'groupBy': 184, 'having': 104, 'nested': 100, 'join': 259}}, 'home_improvement': {'avg_tokens': 68.68316831683168, 'num_tables': 17, 'num_columns': 60, 'sql_count': 101, 'condition_count': {'orderBy': 2, 'groupBy': 23, 'having': 21, 'nested': 77, 'join': 94}}, 'war_films': {'avg_tokens': 109.1025, 'num_tables': 24, 'num_columns': 121, 'sql_count': 400, 'condition_count': {'orderBy': 93, 'groupBy': 131, 'having': 53, 'nested': 8, 'join': 397}}, 'bike_sharing': {'avg_tokens': 46.177570093457945, 'num_tables': 11, 'num_columns': 101, 'sql_count': 214, 'condition_count': {'orderBy': 48, 'groupBy': 99, 'having': 28, 'nested': 68, 'join': 41}}, 'blockchain_technology': {'avg_tokens': 72.26627218934911, 'num_tables': 36, 'num_columns': 172, 'sql_count': 507, 'condition_count': {'orderBy': 305, 'groupBy': 383, 'having': 96, 'nested': 58, 'join': 460}}, 'martial_arts': {'avg_tokens': 33.43312101910828, 'num_tables': 16, 'num_columns': 65, 'sql_count': 157, 'condition_count': {'orderBy': 27, 'groupBy': 38, 'having': 1, 'nested': 10, 'join': 55}}, 'dance': {'avg_tokens': 50.10958904109589, 'num_tables': 42, 'num_columns': 214, 'sql_count': 73, 'condition_count': {'orderBy': 22, 'groupBy': 38, 'having': 5, 'nested': 10, 'join': 52}}, 'search_engine_optimization_(seo)': {'avg_tokens': 42.441295546558706, 'num_tables': 27, 'num_columns': 192, 'sql_count': 494, 'condition_count': {'orderBy': 75, 'groupBy': 17, 'having': 1, 'nested': 78, 'join': 223}}, 'marketing_books': {'avg_tokens': 80.30412371134021, 'num_tables': 22, 'num_columns': 128, 'sql_count': 388, 'condition_count': {'orderBy': 59, 'groupBy': 283, 'having': 113, 'nested': 246, 'join': 350}}, 'risk_management': {'avg_tokens': 51.08510638297872, 'num_tables': 17, 'num_columns': 68, 'sql_count': 47, 'condition_count': {'orderBy': 12, 'groupBy': 30, 'having': 0, 'nested': 0, 'join': 43}}, 'journaling': {'avg_tokens': 69.67721518987342, 'num_tables': 22, 'num_columns': 42, 'sql_count': 158, 'condition_count': {'orderBy': 10, 'groupBy': 82, 'having': 67, 'nested': 37, 'join': 151}}, 'web_development': {'avg_tokens': 59.66019417475728, 'num_tables': 28, 'num_columns': 56, 'sql_count': 103, 'condition_count': {'orderBy': 9, 'groupBy': 61, 'having': 37, 'nested': 27, 'join': 100}}, 'intelligent_public_safety': {'avg_tokens': 92.51626016260163, 'num_tables': 45, 'num_columns': 224, 'sql_count': 246, 'condition_count': {'orderBy': 11, 'groupBy': 110, 'having': 4, 'nested': 12, 'join': 219}}, 'meditation': {'avg_tokens': 72.8173076923077, 'num_tables': 17, 'num_columns': 37, 'sql_count': 104, 'condition_count': {'orderBy': 15, 'groupBy': 91, 'having': 25, 'nested': 5, 'join': 101}}, 'photography': {'avg_tokens': 51.037837837837834, 'num_tables': 29, 'num_columns': 181, 'sql_count': 185, 'condition_count': {'orderBy': 34, 'groupBy': 80, 'having': 32, 'nested': 16, 'join': 154}}, 'web_design': {'avg_tokens': 28.166666666666668, 'num_tables': 4, 'num_columns': 12, 'sql_count': 18, 'condition_count': {'orderBy': 1, 'groupBy': 4, 'having': 0, 'nested': 0, 'join': 8}}, 'dance_therapy': {'avg_tokens': 83.78846153846153, 'num_tables': 11, 'num_columns': 37, 'sql_count': 52, 'condition_count': {'orderBy': 0, 'groupBy': 12, 'having': 10, 'nested': 7, 'join': 51}}, 'astronomy': {'avg_tokens': 40.705078125, 'num_tables': 20, 'num_columns': 324, 'sql_count': 512, 'condition_count': {'orderBy': 14, 'groupBy': 6, 'having': 3, 'nested': 274, 'join': 47}}, 'computer_hardware': {'avg_tokens': 39.079268292682926, 'num_tables': 15, 'num_columns': 35, 'sql_count': 164, 'condition_count': {'orderBy': 28, 'groupBy': 87, 'having': 22, 'nested': 23, 'join': 126}}, 'human_resources_management': {'avg_tokens': 91.70652173913044, 'num_tables': 17, 'num_columns': 77, 'sql_count': 368, 'condition_count': {'orderBy': 288, 'groupBy': 301, 'having': 15, 'nested': 29, 'join': 339}}, 'surgery': {'avg_tokens': 65.23489932885906, 'num_tables': 16, 'num_columns': 184, 'sql_count': 149, 'condition_count': {'orderBy': 79, 'groupBy': 68, 'having': 14, 'nested': 20, 'join': 104}}, 'medicine': {'avg_tokens': 53.857142857142854, 'num_tables': 28, 'num_columns': 105, 'sql_count': 14, 'condition_count': {'orderBy': 1, 'groupBy': 3, 'having': 0, 'nested': 0, 'join': 11}}, 'writing_therapy': {'avg_tokens': 95.56831683168316, 'num_tables': 52, 'num_columns': 190, 'sql_count': 505, 'condition_count': {'orderBy': 39, 'groupBy': 233, 'having': 136, 'nested': 42, 'join': 486}}, 'data_analysis': {'avg_tokens': 69.18529411764706, 'num_tables': 37, 'num_columns': 230, 'sql_count': 340, 'condition_count': {'orderBy': 88, 'groupBy': 245, 'having': 111, 'nested': 64, 'join': 277}}, 'interior_decorating': {'avg_tokens': 50.95652173913044, 'num_tables': 13, 'num_columns': 97, 'sql_count': 230, 'condition_count': {'orderBy': 34, 'groupBy': 83, 'having': 31, 'nested': 12, 'join': 199}}, 'internet_of_things_(iot)': {'avg_tokens': 76.71458333333334, 'num_tables': 22, 'num_columns': 119, 'sql_count': 480, 'condition_count': {'orderBy': 175, 'groupBy': 164, 'having': 9, 'nested': 19, 'join': 429}}, 'movies': {'avg_tokens': 72.78048780487805, 'num_tables': 12, 'num_columns': 34, 'sql_count': 82, 'condition_count': {'orderBy': 8, 'groupBy': 41, 'having': 15, 'nested': 49, 'join': 55}}, 'economics': {'avg_tokens': 50.16858237547893, 'num_tables': 34, 'num_columns': 189, 'sql_count': 261, 'condition_count': {'orderBy': 19, 'groupBy': 51, 'having': 14, 'nested': 15, 'join': 211}}, 'basketball': {'avg_tokens': 78.6355748373102, 'num_tables': 32, 'num_columns': 367, 'sql_count': 461, 'condition_count': {'orderBy': 132, 'groupBy': 271, 'having': 192, 'nested': 91, 'join': 409}}, 'action_films': {'avg_tokens': 89.02995391705069, 'num_tables': 28, 'num_columns': 88, 'sql_count': 434, 'condition_count': {'orderBy': 104, 'groupBy': 225, 'having': 135, 'nested': 218, 'join': 396}}, 'illustration': {'avg_tokens': 78.50737463126843, 'num_tables': 36, 'num_columns': 138, 'sql_count': 339, 'condition_count': {'orderBy': 40, 'groupBy': 112, 'having': 43, 'nested': 52, 'join': 289}}, 'ecology': {'avg_tokens': 120.67168674698796, 'num_tables': 24, 'num_columns': 110, 'sql_count': 332, 'condition_count': {'orderBy': 5, 'groupBy': 96, 'having': 80, 'nested': 32, 'join': 317}}, 'virtual_event_planning': {'avg_tokens': 36.78072289156626, 'num_tables': 23, 'num_columns': 198, 'sql_count': 415, 'condition_count': {'orderBy': 9, 'groupBy': 27, 'having': 4, 'nested': 43, 'join': 60}}, 'acupuncture': {'avg_tokens': 87.00806451612904, 'num_tables': 11, 'num_columns': 54, 'sql_count': 372, 'condition_count': {'orderBy': 1, 'groupBy': 68, 'having': 65, 'nested': 43, 'join': 370}}, 'glass_etching': {'avg_tokens': 67.32491582491582, 'num_tables': 28, 'num_columns': 175, 'sql_count': 594, 'condition_count': {'orderBy': 254, 'groupBy': 304, 'having': 183, 'nested': 53, 'join': 383}}, 'cloud_computing': {'avg_tokens': 49.02127659574468, 'num_tables': 23, 'num_columns': 66, 'sql_count': 188, 'condition_count': {'orderBy': 46, 'groupBy': 109, 'having': 38, 'nested': 24, 'join': 182}}, 'business_strategy': {'avg_tokens': 81.68662674650699, 'num_tables': 45, 'num_columns': 194, 'sql_count': 501, 'condition_count': {'orderBy': 325, 'groupBy': 396, 'having': 155, 'nested': 36, 'join': 476}}, 'history': {'avg_tokens': 32.0, 'num_tables': 23, 'num_columns': 69, 'sql_count': 102, 'condition_count': {'orderBy': 35, 'groupBy': 0, 'having': 0, 'nested': 1, 'join': 51}}, 'epidemiology': {'avg_tokens': 71.21212121212122, 'num_tables': 17, 'num_columns': 71, 'sql_count': 132, 'condition_count': {'orderBy': 14, 'groupBy': 59, 'having': 18, 'nested': 16, 'join': 126}}, 'snowboarding': {'avg_tokens': 70.97410358565737, 'num_tables': 50, 'num_columns': 270, 'sql_count': 502, 'condition_count': {'orderBy': 65, 'groupBy': 213, 'having': 10, 'nested': 111, 'join': 298}}, 'content_marketing': {'avg_tokens': 38.111111111111114, 'num_tables': 26, 'num_columns': 155, 'sql_count': 54, 'condition_count': {'orderBy': 12, 'groupBy': 22, 'having': 6, 'nested': 3, 'join': 24}}, 'music_therapy': {'avg_tokens': 33.476190476190474, 'num_tables': 70, 'num_columns': 355, 'sql_count': 84, 'condition_count': {'orderBy': 17, 'groupBy': 20, 'having': 1, 'nested': 18, 'join': 35}}, 'intelligent_transportation_systems': {'avg_tokens': 74.82116788321167, 'num_tables': 26, 'num_columns': 146, 'sql_count': 274, 'condition_count': {'orderBy': 24, 'groupBy': 57, 'having': 6, 'nested': 30, 'join': 188}}, 'oceanography': {'avg_tokens': 25.286516853932586, 'num_tables': 42, 'num_columns': 96, 'sql_count': 178, 'condition_count': {'orderBy': 0, 'groupBy': 0, 'having': 0, 'nested': 30, 'join': 0}}, 'public_health': {'avg_tokens': 86.76158940397352, 'num_tables': 49, 'num_columns': 247, 'sql_count': 302, 'condition_count': {'orderBy': 62, 'groupBy': 114, 'having': 80, 'nested': 20, 'join': 232}}, 'forensic_science': {'avg_tokens': 64.23880597014926, 'num_tables': 18, 'num_columns': 70, 'sql_count': 134, 'condition_count': {'orderBy': 0, 'groupBy': 24, 'having': 23, 'nested': 0, 'join': 133}}, 'sports_officiating': {'avg_tokens': 80.38783269961978, 'num_tables': 42, 'num_columns': 131, 'sql_count': 263, 'condition_count': {'orderBy': 121, 'groupBy': 206, 'having': 23, 'nested': 2, 'join': 260}}, 'counseling': {'avg_tokens': 91.67955801104972, 'num_tables': 24, 'num_columns': 160, 'sql_count': 362, 'condition_count': {'orderBy': 108, 'groupBy': 240, 'having': 65, 'nested': 28, 'join': 335}}, 'smart_lighting': {'avg_tokens': 113.11538461538461, 'num_tables': 31, 'num_columns': 161, 'sql_count': 156, 'condition_count': {'orderBy': 98, 'groupBy': 77, 'having': 12, 'nested': 47, 'join': 139}}, 'yoga_therapy': {'avg_tokens': 49.282051282051285, 'num_tables': 19, 'num_columns': 38, 'sql_count': 78, 'condition_count': {'orderBy': 5, 'groupBy': 28, 'having': 15, 'nested': 8, 'join': 70}}, 'pharmacy': {'avg_tokens': 68.40387722132472, 'num_tables': 39, 'num_columns': 241, 'sql_count': 619, 'condition_count': {'orderBy': 241, 'groupBy': 286, 'having': 57, 'nested': 204, 'join': 372}}, 'advertising': {'avg_tokens': 83.16448598130842, 'num_tables': 33, 'num_columns': 170, 'sql_count': 535, 'condition_count': {'orderBy': 17, 'groupBy': 155, 'having': 3, 'nested': 12, 'join': 479}}, 'graphic_novels': {'avg_tokens': 88.40163934426229, 'num_tables': 40, 'num_columns': 133, 'sql_count': 488, 'condition_count': {'orderBy': 15, 'groupBy': 98, 'having': 76, 'nested': 140, 'join': 416}}, 'romance_films': {'avg_tokens': 66.01269035532995, 'num_tables': 26, 'num_columns': 67, 'sql_count': 394, 'condition_count': {'orderBy': 42, 'groupBy': 3, 'having': 1, 'nested': 6, 'join': 383}}, 'stand-up_comedy': {'avg_tokens': 78.30058939096267, 'num_tables': 38, 'num_columns': 206, 'sql_count': 509, 'condition_count': {'orderBy': 16, 'groupBy': 131, 'having': 32, 'nested': 45, 'join': 464}}, 'geology': {'avg_tokens': 66.46268656716418, 'num_tables': 19, 'num_columns': 83, 'sql_count': 134, 'condition_count': {'orderBy': 0, 'groupBy': 16, 'having': 1, 'nested': 6, 'join': 127}}, 'business_development': {'avg_tokens': 39.723214285714285, 'num_tables': 14, 'num_columns': 44, 'sql_count': 112, 'condition_count': {'orderBy': 29, 'groupBy': 36, 'having': 10, 'nested': 38, 'join': 62}}, 'comic_books': {'avg_tokens': 103.51351351351352, 'num_tables': 45, 'num_columns': 107, 'sql_count': 407, 'condition_count': {'orderBy': 152, 'groupBy': 174, 'having': 44, 'nested': 102, 'join': 400}}, 'drama_films': {'avg_tokens': 57.57377049180328, 'num_tables': 15, 'num_columns': 37, 'sql_count': 122, 'condition_count': {'orderBy': 32, 'groupBy': 83, 'having': 39, 'nested': 11, 'join': 118}}, 'information_security': {'avg_tokens': 68.56862745098039, 'num_tables': 21, 'num_columns': 47, 'sql_count': 51, 'condition_count': {'orderBy': 7, 'groupBy': 21, 'having': 9, 'nested': 15, 'join': 47}}, 'gardening': {'avg_tokens': 44.51520912547529, 'num_tables': 23, 'num_columns': 219, 'sql_count': 526, 'condition_count': {'orderBy': 10, 'groupBy': 18, 'having': 4, 'nested': 27, 'join': 211}}, 'virtual_reality': {'avg_tokens': 122.95846645367412, 'num_tables': 31, 'num_columns': 84, 'sql_count': 313, 'condition_count': {'orderBy': 68, 'groupBy': 70, 'having': 12, 'nested': 141, 'join': 246}}, 'craft_beer_brewing': {'avg_tokens': 68.23140495867769, 'num_tables': 23, 'num_columns': 55, 'sql_count': 121, 'condition_count': {'orderBy': 6, 'groupBy': 65, 'having': 24, 'nested': 57, 'join': 100}}, 'ui_ux_design': {'avg_tokens': 58.99175824175824, 'num_tables': 21, 'num_columns': 145, 'sql_count': 364, 'condition_count': {'orderBy': 42, 'groupBy': 127, 'having': 73, 'nested': 190, 'join': 296}}, 'botany': {'avg_tokens': 99.32, 'num_tables': 56, 'num_columns': 241, 'sql_count': 500, 'condition_count': {'orderBy': 11, 'groupBy': 19, 'having': 5, 'nested': 21, 'join': 390}}, 'screenwriting': {'avg_tokens': 31.367346938775512, 'num_tables': 13, 'num_columns': 36, 'sql_count': 98, 'condition_count': {'orderBy': 9, 'groupBy': 25, 'having': 14, 'nested': 22, 'join': 42}}, 'sports_medicine': {'avg_tokens': 57.9236641221374, 'num_tables': 33, 'num_columns': 200, 'sql_count': 131, 'condition_count': {'orderBy': 41, 'groupBy': 91, 'having': 20, 'nested': 13, 'join': 111}}, 'big_data': {'avg_tokens': 106.31428571428572, 'num_tables': 16, 'num_columns': 121, 'sql_count': 385, 'condition_count': {'orderBy': 12, 'groupBy': 220, 'having': 100, 'nested': 244, 'join': 344}}, 'musical_theater': {'avg_tokens': 75.50462107208872, 'num_tables': 39, 'num_columns': 239, 'sql_count': 541, 'condition_count': {'orderBy': 398, 'groupBy': 442, 'having': 70, 'nested': 28, 'join': 498}}, 'archaeology': {'avg_tokens': 58.99090909090909, 'num_tables': 28, 'num_columns': 133, 'sql_count': 110, 'condition_count': {'orderBy': 1, 'groupBy': 84, 'having': 3, 'nested': 0, 'join': 91}}, 'pilates': {'avg_tokens': 70.96041666666666, 'num_tables': 40, 'num_columns': 239, 'sql_count': 480, 'condition_count': {'orderBy': 149, 'groupBy': 334, 'having': 128, 'nested': 89, 'join': 413}}, 'data_mining': {'avg_tokens': 39.613995485327315, 'num_tables': 38, 'num_columns': 206, 'sql_count': 443, 'condition_count': {'orderBy': 52, 'groupBy': 177, 'having': 38, 'nested': 87, 'join': 223}}, 'marine_biology': {'avg_tokens': 43.89165186500888, 'num_tables': 30, 'num_columns': 216, 'sql_count': 563, 'condition_count': {'orderBy': 26, 'groupBy': 124, 'having': 3, 'nested': 14, 'join': 219}}, 'cosmetics_making': {'avg_tokens': 60.473794549266245, 'num_tables': 26, 'num_columns': 180, 'sql_count': 477, 'condition_count': {'orderBy': 129, 'groupBy': 242, 'having': 100, 'nested': 107, 'join': 412}}, 'mobile_app_design': {'avg_tokens': 80.5, 'num_tables': 18, 'num_columns': 34, 'sql_count': 108, 'condition_count': {'orderBy': 5, 'groupBy': 66, 'having': 50, 'nested': 53, 'join': 102}}, 'game_design': {'avg_tokens': 71.42736842105263, 'num_tables': 24, 'num_columns': 88, 'sql_count': 475, 'condition_count': {'orderBy': 7, 'groupBy': 57, 'having': 51, 'nested': 4, 'join': 355}}, 'high-speed_trains': {'avg_tokens': 47.72115384615385, 'num_tables': 29, 'num_columns': 281, 'sql_count': 104, 'condition_count': {'orderBy': 14, 'groupBy': 23, 'having': 4, 'nested': 9, 'join': 33}}, 'cardiovascular_training': {'avg_tokens': 31.133333333333333, 'num_tables': 15, 'num_columns': 38, 'sql_count': 75, 'condition_count': {'orderBy': 0, 'groupBy': 0, 'having': 0, 'nested': 29, 'join': 15}}, 'documentary_films': {'avg_tokens': 86.6829268292683, 'num_tables': 43, 'num_columns': 182, 'sql_count': 492, 'condition_count': {'orderBy': 9, 'groupBy': 15, 'having': 3, 'nested': 5, 'join': 403}}, 'business': {'avg_tokens': 81.501953125, 'num_tables': 36, 'num_columns': 226, 'sql_count': 512, 'condition_count': {'orderBy': 375, 'groupBy': 396, 'having': 95, 'nested': 32, 'join': 443}}, 'entomology': {'avg_tokens': 93.86190476190477, 'num_tables': 23, 'num_columns': 131, 'sql_count': 210, 'condition_count': {'orderBy': 62, 'groupBy': 74, 'having': 19, 'nested': 2, 'join': 192}}, 'data_analytics': {'avg_tokens': 70.15189873417721, 'num_tables': 27, 'num_columns': 62, 'sql_count': 237, 'condition_count': {'orderBy': 28, 'groupBy': 194, 'having': 68, 'nested': 8, 'join': 237}}, 'agriculture': {'avg_tokens': 59.526881720430104, 'num_tables': 13, 'num_columns': 67, 'sql_count': 186, 'condition_count': {'orderBy': 30, 'groupBy': 110, 'having': 55, 'nested': 73, 'join': 131}}, 'soapstone_carving': {'avg_tokens': 70.12338858195211, 'num_tables': 26, 'num_columns': 153, 'sql_count': 543, 'condition_count': {'orderBy': 326, 'groupBy': 405, 'having': 67, 'nested': 41, 'join': 443}}, 'boxing': {'avg_tokens': 83.93333333333334, 'num_tables': 17, 'num_columns': 71, 'sql_count': 120, 'condition_count': {'orderBy': 1, 'groupBy': 91, 'having': 35, 'nested': 31, 'join': 118}}, 'leadership': {'avg_tokens': 78.41233766233766, 'num_tables': 42, 'num_columns': 232, 'sql_count': 308, 'condition_count': {'orderBy': 78, 'groupBy': 100, 'having': 8, 'nested': 18, 'join': 249}}, 'sustainability': {'avg_tokens': 95.42560553633218, 'num_tables': 27, 'num_columns': 144, 'sql_count': 289, 'condition_count': {'orderBy': 210, 'groupBy': 234, 'having': 63, 'nested': 55, 'join': 272}}, 'athletic_training': {'avg_tokens': 94.0730593607306, 'num_tables': 47, 'num_columns': 300, 'sql_count': 438, 'condition_count': {'orderBy': 68, 'groupBy': 298, 'having': 183, 'nested': 194, 'join': 388}}, 'interior_design': {'avg_tokens': 56.31404958677686, 'num_tables': 22, 'num_columns': 54, 'sql_count': 121, 'condition_count': {'orderBy': 78, 'groupBy': 82, 'having': 60, 'nested': 28, 'join': 105}}, 'talk_shows': {'avg_tokens': 81.28070175438596, 'num_tables': 12, 'num_columns': 51, 'sql_count': 171, 'condition_count': {'orderBy': 68, 'groupBy': 111, 'having': 66, 'nested': 88, 'join': 166}}, 'volleyball': {'avg_tokens': 140.7431693989071, 'num_tables': 23, 'num_columns': 61, 'sql_count': 549, 'condition_count': {'orderBy': 2, 'groupBy': 527, 'having': 518, 'nested': 24, 'join': 547}}, 'makeup_artistry': {'avg_tokens': 63.06060606060606, 'num_tables': 36, 'num_columns': 236, 'sql_count': 198, 'condition_count': {'orderBy': 86, 'groupBy': 135, 'having': 28, 'nested': 24, 'join': 162}}, 'coaching': {'avg_tokens': 40.443697478991595, 'num_tables': 24, 'num_columns': 159, 'sql_count': 595, 'condition_count': {'orderBy': 43, 'groupBy': 83, 'having': 17, 'nested': 34, 'join': 277}}, 'tv_shows': {'avg_tokens': 98.33707865168539, 'num_tables': 63, 'num_columns': 245, 'sql_count': 356, 'condition_count': {'orderBy': 10, 'groupBy': 144, 'having': 133, 'nested': 206, 'join': 301}}, 'augmented_reality': {'avg_tokens': 45.3421052631579, 'num_tables': 46, 'num_columns': 238, 'sql_count': 114, 'condition_count': {'orderBy': 32, 'groupBy': 64, 'having': 6, 'nested': 15, 'join': 56}}, 'anthropology': {'avg_tokens': 66.04255319148936, 'num_tables': 19, 'num_columns': 62, 'sql_count': 141, 'condition_count': {'orderBy': 15, 'groupBy': 43, 'having': 21, 'nested': 0, 'join': 127}}, 'business_books': {'avg_tokens': 84.71030042918454, 'num_tables': 35, 'num_columns': 166, 'sql_count': 466, 'condition_count': {'orderBy': 295, 'groupBy': 365, 'having': 25, 'nested': 25, 'join': 405}}, 'horror_films': {'avg_tokens': 97.93159609120521, 'num_tables': 56, 'num_columns': 201, 'sql_count': 307, 'condition_count': {'orderBy': 240, 'groupBy': 207, 'having': 59, 'nested': 12, 'join': 234}}, 'electronic_music': {'avg_tokens': 77.93483709273183, 'num_tables': 33, 'num_columns': 72, 'sql_count': 399, 'condition_count': {'orderBy': 112, 'groupBy': 322, 'having': 233, 'nested': 97, 'join': 394}}, 'sociology': {'avg_tokens': 92.0304347826087, 'num_tables': 28, 'num_columns': 76, 'sql_count': 230, 'condition_count': {'orderBy': 4, 'groupBy': 182, 'having': 98, 'nested': 120, 'join': 210}}, 'psychology_books': {'avg_tokens': 86.65876777251185, 'num_tables': 16, 'num_columns': 105, 'sql_count': 422, 'condition_count': {'orderBy': 205, 'groupBy': 252, 'having': 104, 'nested': 110, 'join': 392}}, 'energy_management': {'avg_tokens': 37.725806451612904, 'num_tables': 10, 'num_columns': 42, 'sql_count': 62, 'condition_count': {'orderBy': 22, 'groupBy': 23, 'having': 1, 'nested': 2, 'join': 0}}, 'news_programs': {'avg_tokens': 97.97849462365592, 'num_tables': 32, 'num_columns': 81, 'sql_count': 279, 'condition_count': {'orderBy': 25, 'groupBy': 89, 'having': 57, 'nested': 203, 'join': 246}}, 'country_music': {'avg_tokens': 60.895890410958906, 'num_tables': 31, 'num_columns': 59, 'sql_count': 365, 'condition_count': {'orderBy': 202, 'groupBy': 160, 'having': 67, 'nested': 46, 'join': 351}}, 'chiropractic': {'avg_tokens': 103.82035928143712, 'num_tables': 17, 'num_columns': 60, 'sql_count': 167, 'condition_count': {'orderBy': 0, 'groupBy': 0, 'having': 0, 'nested': 0, 'join': 166}}, 'developmental_psychology': {'avg_tokens': 90.43450479233226, 'num_tables': 22, 'num_columns': 77, 'sql_count': 313, 'condition_count': {'orderBy': 62, 'groupBy': 103, 'having': 19, 'nested': 106, 'join': 300}}, 'political_science': {'avg_tokens': 38.54703832752613, 'num_tables': 29, 'num_columns': 165, 'sql_count': 287, 'condition_count': {'orderBy': 125, 'groupBy': 77, 'having': 17, 'nested': 110, 'join': 71}}, 'sports_psychology': {'avg_tokens': 100.74709976798144, 'num_tables': 26, 'num_columns': 158, 'sql_count': 431, 'condition_count': {'orderBy': 128, 'groupBy': 116, 'having': 8, 'nested': 36, 'join': 360}}, 'tai_chi': {'avg_tokens': 85.71348314606742, 'num_tables': 13, 'num_columns': 57, 'sql_count': 178, 'condition_count': {'orderBy': 1, 'groupBy': 31, 'having': 24, 'nested': 51, 'join': 173}}, 'energy': {'avg_tokens': 50.63522012578616, 'num_tables': 19, 'num_columns': 120, 'sql_count': 477, 'condition_count': {'orderBy': 5, 'groupBy': 133, 'having': 0, 'nested': 30, 'join': 141}}, 'fitness_instruction': {'avg_tokens': 38.02325581395349, 'num_tables': 30, 'num_columns': 242, 'sql_count': 43, 'condition_count': {'orderBy': 14, 'groupBy': 26, 'having': 2, 'nested': 0, 'join': 31}}, 'corporate_social_responsibility': {'avg_tokens': 76.6875, 'num_tables': 13, 'num_columns': 64, 'sql_count': 192, 'condition_count': {'orderBy': 1, 'groupBy': 131, 'having': 3, 'nested': 12, 'join': 177}}, 'genetics': {'avg_tokens': 64.12253829321664, 'num_tables': 43, 'num_columns': 207, 'sql_count': 457, 'condition_count': {'orderBy': 3, 'groupBy': 8, 'having': 6, 'nested': 11, 'join': 421}}}'''
data_dict = ast.literal_eval(data)
# Initialize a dictionary to hold the max values, sum values, and lists of all values
stat_dict = {
'avg_tokens': {'value': 0, 'category': None, 'sum': 0, 'values': []},
'num_tables': {'value': 0, 'category': None, 'sum': 0, 'values': []},
'num_columns': {'value': 0, 'category': None, 'sum': 0, 'values': []},
'sql_count': {'value': 0, 'category': None, 'sum': 0, 'values': []},
'orderBy': {'value': 0, 'category': None, 'sum': 0, 'values': []},
'groupBy': {'value': 0, 'category': None, 'sum': 0, 'values': []},
'having': {'value': 0, 'category': None, 'sum': 0, 'values': []},
'nested': {'value': 0, 'category': None, 'sum': 0, 'values': []},
'join': {'value': 0, 'category': None, 'sum': 0, 'values': []}
}
output = ''
categories_count = 0
for category, stats in data_dict.items():
line = "{} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\\".format(
category.capitalize(),
round(stats['avg_tokens']),
stats['num_tables'],
stats['num_columns'],
stats['sql_count'],
stats['condition_count']['orderBy'],
stats['condition_count']['groupBy'],
stats['condition_count']['having'],
stats['condition_count']['nested'],
stats['condition_count']['join']
)
output += line + '\n'
categories_count += 1 # Increase the count of categories
# Check if each stat is a new max and if so, update the stat_dict
# Also add each stat to the sum for the averages later, and to the list for the median
for stat in stat_dict.keys():
if stat in ['orderBy', 'groupBy', 'having', 'nested', 'join']:
value = stats['condition_count'][stat]
else:
value = stats[stat]
if value > stat_dict[stat]['value']:
stat_dict[stat]['value'] = value
stat_dict[stat]['category'] = category
stat_dict[stat]['sum'] += value
stat_dict[stat]['values'].append(value)
print(output)
# Now print the max of every element, indexed by category
# And print the average and median of every element
for stat, info in stat_dict.items():
print("The maximum value of {} is {} in the category {}.".format(stat, info['value'], info['category']))
print("The average value of {} is {}.".format(stat, info['sum'] / categories_count))
print("The median value of {} is {}.".format(stat, statistics.median(info['values'])))
| [] |
2024-01-10 | YanJiaHuan/Text2Sql | multi_turn~Bard_GPT~V2~V2.py | import pandas as pd
import time
import re
import openai
import os
from os import environ
import sys
import tiktoken
import sqlite3
from Bard import Chatbot
'''
This is a GPT-Bard contrasive prompting method, in this demo, we still focus on Spider,
but the difference will be: The few-shots are generated by Bard.
Version 2:
1. The few-shot is generated by Bard.
2. Bard will generate SQL/question samples based on the real input SQL/question (with CoT).
3. GPT know the few-shots are generated by Bard.
4. Bard know it is Spider task.
'''
#################### 0. Prompt ####################
SQL_generation_prompt = '''
You are an expert in SQL. I will give you a natural language question and a database schema,
please help me generate the corresponding SQL query with no further explaination.
'''
few_shot_generation_prompt_Bard = '''
You are an expert in SQL. I will give you a database schema in Spider dataset, and you need to generate three
SQL queries with natural language questions based on the schema and explian the chain of thoughts logic. Also, I will give you some exapmles
of how this task is done.
'''
three_shots_SQL_generation_prompt = '''
Here is some examples of EASY, MEDIUM and HARD SQL queries.
SELECT count(*) FROM singer
SELECT avg(weight) , pettype FROM pets GROUP BY pettype
SELECT T1.fname , T1.age FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'dog' AND T1.stuid NOT IN (SELECT T1.stuid FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'cat')
'''
three_shots_SQL_generation_prompt_from_Bard = '''
I will give you some expamples of translate questions to SQLs, I will also explain how the SQLs are generated step by step. please follow the instructions and generate your own answer(SQL).
'''
three_shot_Spider_prompt_without_explain = '''
Here is a sample of text2sql for you to understand the task.
Table advisor, columns = [*,s_ID,i_ID]
Table classroom, columns = [*,building,room_number,capacity]
Table course, columns = [*,course_id,title,dept_name,credits]
Table department, columns = [*,dept_name,building,budget]
Table instructor, columns = [*,ID,name,dept_name,salary]
Table prereq, columns = [*,course_id,prereq_id]
Table section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]
Table student, columns = [*,ID,name,dept_name,tot_cred]
Table takes, columns = [*,ID,course_id,sec_id,semester,year,grade]
Table teaches, columns = [*,ID,course_id,sec_id,semester,year]
Table time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]
foreign key:[course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]
primary key:[classroom.building,department.dept_name,course.course_id,instructor.ID,section.course_id,teaches.ID,student.ID,takes.ID,advisor.s_ID,time_slot.time_slot_id,prereq.course_id]
example 1:
Question: Find out the average salary of professors?
Let's think step by step: 'salary of professors' -> 'salary of instructor' -> Go to Table instructor, find the column salary, and calculate the average value.
SELECT avg ( salary ) FROM instructor
example 2:
Question: Find the average salary of the professors of each department?
Let's think step by step: 'salary of professors of each department' -> 'salary of instructor of each department' -> Go to Table instructor, find the column salary, and calculate the average value. 'each department'->group by the department.
SELECT avg ( salary ) , dept_name FROM instructor GROUP BY dept_name
example 3:
Question: Which department has the highest average salary of professors?
Let's think step by step: 'highest average salary of professors' -> 'highest average salary of instructor' -> Go to Table instructor, find the column salary, and calculate the average value. 'highest' -> order by the average value. 'department' -> group by the department.
SELECT dept_name FROM instructor GROUP BY dept_name ORDER BY avg ( salary ) DESC LIMIT 1
'''
checker_prompt = '''
Please help me generate the corresponding SQL query with no further explaination.
'''
#################### 1. Set up ####################
#----------------------------------------------------------------------------------------------------------
# API_KEY = "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" # 自己的
API_KEY = "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" # 买的
# API_KEY = "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" # gpt4 孙哥
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = os.getenv("OPENAI_API_KEY")
#changed
task = 'Spider' # 1 for CoSQL, 2 for Spider
if task == 'CoSQL':
path_to_CoSQL = "./cosql_dataset"
DATASET_SCHEMA = path_to_CoSQL+"/tables.json"
DATASET = path_to_CoSQL+"/sql_state_tracking/cosql_dev.json"
OUTPUT_FILE_1 = "./predicted_sql.txt"
OUTPUT_FILE_2 = "./gold_sql.txt"
DATABASE_PATH = path_to_CoSQL+"/database"
else:
path_to_Spider = "../../Spider"
DATASET_SCHEMA = path_to_Spider + "/tables.json"
DATASET = path_to_Spider + "/dev.json"
OUTPUT_FILE_1 = "./Spider/predicted_sql.txt"
OUTPUT_FILE_2 = "./Spider/gold_sql.txt"
DATABASE_PATH = path_to_Spider + "/database"
# set max tokens limit
MAX_TOKENS = 4096
model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
encoding = tiktoken.encoding_for_model(model_name)
# count the token
def num_tokens_from_string(string: str, model_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# load dataset
def load_data(DATASET):
return pd.read_json(DATASET)
def find_foreign_keys_MYSQL_like(db_name):
df = spider_foreign[spider_foreign['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['First Table Name'] + '.' + row['First Table Foreign Key'] + " = " + row['Second Table Name'] + '.' + row['Second Table Foreign Key'] + ','
output= output[:-1] + "]"
return output
def find_fields_MYSQL_like(db_name):
df = spider_schema[spider_schema['Database name'] == db_name]
df = df.groupby(' Table Name')
output = ""
for name, group in df:
output += "Table " +name+ ', columns = ['
for index, row in group.iterrows():
output += row[" Field Name"]+','
output = output[:-1]
output += "]\n"
return output
def find_primary_keys_MYSQL_like(db_name):
df = spider_primary[spider_primary['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['Table Name'] + '.' + row['Primary Key'] +','
output = output[:-1]
output += "]\n"
return output
def creatiing_schema(DATASET_JSON):
schema_df = pd.read_json(DATASET_JSON)
schema_df = schema_df.drop(['column_names','table_names'], axis=1)
schema = []
f_keys = []
p_keys = []
for index, row in schema_df.iterrows():
tables = row['table_names_original']
col_names = row['column_names_original']
col_types = row['column_types']
foreign_keys = row['foreign_keys']
primary_keys = row['primary_keys']
for col, col_type in zip(col_names, col_types):
index, col_name = col
if index == -1:
for table in tables:
schema.append([row['db_id'], table, '*', 'text'])
else:
schema.append([row['db_id'], tables[index], col_name, col_type])
for primary_key in primary_keys:
index, column = col_names[primary_key]
p_keys.append([row['db_id'], tables[index], column])
for foreign_key in foreign_keys:
first, second = foreign_key
first_index, first_column = col_names[first]
second_index, second_column = col_names[second]
f_keys.append([row['db_id'], tables[first_index], tables[second_index], first_column, second_column])
spider_schema = pd.DataFrame(schema, columns=['Database name', ' Table Name', ' Field Name', ' Type'])
spider_primary = pd.DataFrame(p_keys, columns=['Database name', 'Table Name', 'Primary Key'])
spider_foreign = pd.DataFrame(f_keys,
columns=['Database name', 'First Table Name', 'Second Table Name', 'First Table Foreign Key',
'Second Table Foreign Key'])
return spider_schema,spider_primary,spider_foreign
def SQL_checker(sql, database):
# sql be like: "SELECT * FROM car_1 WHERE car_1.id = 1"
# database is the path to local xxx.sqlite
# the function of this part is to check if the sql is valid, if not, return the error message
path = DATABASE_PATH + '/' + database + '/' + database + '.sqlite'
try:
# Connect to the SQLite database
conn = sqlite3.connect(path)
# Create a cursor object to execute the SQL query
cursor = conn.cursor()
# Execute the SQL query
cursor.execute(sql)
# Commit the transaction and close the connection
conn.commit()
conn.close()
# Return a success message if the SQL query is valid
prompt = "The SQL query is valid in grammar."
checker = False
except sqlite3.Error as e:
# Return the error message if the SQL query is not valid
instruction = f"""#### the sql generated by you: {sql}, has error like :{e} , please fix the error and generate again. \n"""
fields = find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "Primary_keys = " + find_primary_keys_MYSQL_like(database)
prompt = instruction + fields + checker_prompt
checker = True
return prompt, checker
import time
def GPT4_generation(prompt):
'''
openai.error.RateLimitError: Rate limit reached for default-gpt-3.5-turbo
in organization org-GFmlumrCZBB2Y40fVv7f8qgp on requests per min. Limit: 3 / min.
Please try again in 20s. Contact us through our help center at help.openai.com if you continue to have issues.
Please add a payment method to your account to increase your rate limit.
Visit https://platform.openai.com/account/billing to add a payment method.
'''
limit_marker = False
fake_SQL = "SELECT COUNT(*) FROM singer"
while True:
try:
response = openai.ChatCompletion.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=600,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
return response['choices'][0]['message']['content'], limit_marker
except openai.error.RateLimitError as e:
print(f"RateLimitError: {e}")
print("Sleeping for 20 seconds...")
time.sleep(20)
print("Retrying...")
except Exception as e:
print(f"Unexpected error: {e}")
return fake_SQL, limit_marker
# initial the chatbot
def extract_sql(response):
matches = re.findall(r'```sql\n(.*?)\n```', response, re.DOTALL)
return matches
tokens=(
"WwiJN0oLURBx7gX_O8WVz9Fufj1iefdzkpt2fsbsb-e8al2Kvufapnj5mYa6vGo5P1ub9w.",
"WwhXnsbFLxozhOKG1-NUO78iif9IiN5El3Qk9yk5fi70TMcaUMOwfWwjTyqAyNe6MCtiEA.",
"Wwi1wxVyz-X2piJk8Ts84d08Fm1UmHDTOS7ftlD6LCXdbUVjFrQlJfl97an8UHhZQM8juQ.",
"Wwj6xMcUvzQUaKwcRQ-qvwrIcZLDBRp9XP25HkEVBAJDVZBzujepzI_dttehdJiCAjCIMg.",
"WwjMZ_TL9xIl4jREPppT5df6tAsjLLgjRo_GKK5iLslGOh5lMtstOMP_iJEADXq6gjFEKA.",
"Wgj-oa5yHxfmjo0lLybtWGLiWYoKTZ07NXcUiaPiUHmtQQiAKlfzNTOA9lwqmCz2N0qGFg."
)
def Bard_generation(prompt):
limit_marker = False
token_index = 0
chatbot = Chatbot(tokens[token_index])
answer = chatbot.ask(prompt)
print('whole answer', answer)
while True: # This loop will continue until a string is returned
if isinstance(answer, dict): # check if answer is a dictionary (error response)
limit_marker = True
print("Token limit reached, switching to a new token...")
token_index += 1 # Move to the next token
if token_index >= len(tokens): # If we've used all tokens, start over
token_index = 0
print("exceeding total limit, Waiting 15 seconds...")
time.sleep(15) # freeze for 15s
chatbot = Chatbot(tokens[token_index]) # Create a new chatbot with the new token
answer = chatbot.ask(prompt) # resend the request
else:
return answer[0][0], limit_marker
def save_breaker(breaker):
with open("breaker.txt", "w") as f:
f.write(str(breaker))
# Function to load the breaker value from a file
def load_breaker():
if os.path.exists("breaker.txt"):
with open("breaker.txt", "r") as f:
breaker = int(f.read())
if breaker > 1037:
breaker = 0
else:
breaker = breaker
return breaker
return 0
if __name__ == '__main__':
###########################################################################################
# load the data
spider_schema,spider_primary,spider_foreign = creatiing_schema(DATASET_SCHEMA)
val_df = load_data(DATASET)
SQLs_temp_pred = []
SQLs_temp_gold = []
for index,sample in val_df.iterrows():
print('index:',index)
db_id = sample['db_id'] # e.g.'car_1'
question = sample['question'] # e.g.'How many car models are produced by each maker? List the count and the maker full name.'
SQL_gold = sample['query'] # e.g.'SELECT COUNT(*) FROM car_1 WHERE car_1.id = 1'
print('SQL_gold:',SQL_gold)
schema = find_fields_MYSQL_like(db_id) + '\n' + "foreign key:" + find_foreign_keys_MYSQL_like(
db_id) + '\n' + "primary key:" + find_primary_keys_MYSQL_like(db_id) #
###############################################
'''message to Bard, to get few-shots'''
message_Bard = few_shot_generation_prompt_Bard + \
three_shot_Spider_prompt_without_explain + \
"Now, It is your turn:" + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema
print('message to Bard:', message_Bard)
response_Bard, _ = Bard_generation(message_Bard)
print('response_Bard:', response_Bard)
###############################################
'''message to GPT, to get SQL'''
message_GPT = SQL_generation_prompt + \
three_shots_SQL_generation_prompt_from_Bard + \
response_Bard + \
SQL_generation_prompt + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema + \
"Just give me the plain SQL without any placeholders." + \
"\nquestion:" + question+ \
"\nYour SQL:"
print('message to GPT3.5:', message_GPT)
SQL, limit_marker = GPT4_generation(message_GPT)
print('SQL:', SQL)
SQL = SQL.replace('\n', ' ')
print('\nGPT generated SQL:', SQL + '\n')
SQLs_temp_pred.append(SQL)
SQLs_temp_gold.append(SQL_gold+'\t'+db_id)
# with open ('./predicted_sql.txt', 'a') as f:
# f.write(SQL+'\n')
# with open ('./gold_sql.txt', 'a') as f:
# f.write(SQL_gold+'\t'+db_id+'\n')
# python v2.py | [
"\nHere is a sample of text2sql for you to understand the task.\nTable advisor, columns = [*,s_ID,i_ID]\nTable classroom, columns = [*,building,room_number,capacity]\nTable course, columns = [*,course_id,title,dept_name,credits]\nTable department, columns = [*,dept_name,building,budget]\nTable instructor, columns = [*,ID,name,dept_name,salary]\nTable prereq, columns = [*,course_id,prereq_id]\nTable section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]\nTable student, columns = [*,ID,name,dept_name,tot_cred]\nTable takes, columns = [*,ID,course_id,sec_id,semester,year,grade]\nTable teaches, columns = [*,ID,course_id,sec_id,semester,year]\nTable time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]\n\nforeign key:[course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]\nprimary key:[classroom.building,department.dept_name,course.course_id,instructor.ID,section.course_id,teaches.ID,student.ID,takes.ID,advisor.s_ID,time_slot.time_slot_id,prereq.course_id]\n\nexample 1:\nQuestion: Find out the average salary of professors?\nLet's think step by step: 'salary of professors' -> 'salary of instructor' -> Go to Table instructor, find the column salary, and calculate the average value.\nSELECT avg ( salary ) FROM instructor\n\nexample 2:\nQuestion: Find the average salary of the professors of each department?\nLet's think step by step: 'salary of professors of each department' -> 'salary of instructor of each department' -> Go to Table instructor, find the column salary, and calculate the average value. 'each department'->group by the department.\nSELECT avg ( salary ) , dept_name FROM instructor GROUP BY dept_name\n\nexample 3:\nQuestion: Which department has the highest average salary of professors?\nLet's think step by step: 'highest average salary of professors' -> 'highest average salary of instructor' -> Go to Table instructor, find the column salary, and calculate the average value. 'highest' -> order by the average value. 'department' -> group by the department.\nSELECT dept_name FROM instructor GROUP BY dept_name ORDER BY avg ( salary ) DESC LIMIT 1\n",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"\nPlease help me generate the corresponding SQL query with no further explaination.\n",
"The SQL query is valid in grammar.",
"\nYou are an expert in SQL. I will give you a natural language question and a database schema, \nplease help me generate the corresponding SQL query with no further explaination.\n",
"\nHere is some examples of EASY, MEDIUM and HARD SQL queries.\nSELECT count(*) FROM singer \nSELECT avg(weight) , pettype FROM pets GROUP BY pettype\nSELECT T1.fname , T1.age FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'dog' AND T1.stuid NOT IN (SELECT T1.stuid FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'cat')\n",
"\nI will give you some expamples of translate questions to SQLs, I will also explain how the SQLs are generated step by step. please follow the instructions and generate your own answer(SQL).\n",
"\nYou are an expert in SQL. I will give you a database schema in Spider dataset, and you need to generate three\nSQL queries with natural language questions based on the schema and explian the chain of thoughts logic. Also, I will give you some exapmles\nof how this task is done.\n"
] |
2024-01-10 | YanJiaHuan/Text2Sql | multi_turn~GPT4_Spider.py | import pandas as pd
import time
import openai
import os
import sys
#----------------------------------------------------prompts-----------------------------------------------
schema_linking_prompt = '''Table advisor, columns = [*,s_ID,i_ID]
Table classroom, columns = [*,building,room_number,capacity]
Table course, columns = [*,course_id,title,dept_name,credits]
Table department, columns = [*,dept_name,building,budget]
Table instructor, columns = [*,ID,name,dept_name,salary]
Table prereq, columns = [*,course_id,prereq_id]
Table section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]
Table student, columns = [*,ID,name,dept_name,tot_cred]
Table takes, columns = [*,ID,course_id,sec_id,semester,year,grade]
Table teaches, columns = [*,ID,course_id,sec_id,semester,year]
Table time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]
Foreign_keys = [course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]
Q: "Find the buildings which have rooms with capacity more than 50."
A: Let’s think step by step. In the question "Find the buildings which have rooms with capacity more than 50.", we are asked:
"the buildings which have rooms" so we need column = [classroom.capacity]
"rooms with capacity" so we need column = [classroom.building]
Based on the columns and tables, we need these Foreign_keys = [].
Based on the tables, columns, and Foreign_keys, The set of possible cell values are = [50]. So the Schema_links are:
Schema_links: [classroom.building,classroom.capacity,50]
Table department, columns = [*,Department_ID,Name,Creation,Ranking,Budget_in_Billions,Num_Employees]
Table head, columns = [*,head_ID,name,born_state,age]
Table management, columns = [*,department_ID,head_ID,temporary_acting]
Foreign_keys = [management.head_ID = head.head_ID,management.department_ID = department.Department_ID]
Q: "How many heads of the departments are older than 56 ?"
A: Let’s think step by step. In the question "How many heads of the departments are older than 56 ?", we are asked:
"How many heads of the departments" so we need column = [head.*]
"older" so we need column = [head.age]
Based on the columns and tables, we need these Foreign_keys = [].
Based on the tables, columns, and Foreign_keys, The set of possible cell values are = [56]. So the Schema_links are:
Schema_links: [head.*,head.age,56]
Table department, columns = [*,Department_ID,Name,Creation,Ranking,Budget_in_Billions,Num_Employees]
Table head, columns = [*,head_ID,name,born_state,age]
Table management, columns = [*,department_ID,head_ID,temporary_acting]
Foreign_keys = [management.head_ID = head.head_ID,management.department_ID = department.Department_ID]
Q: "what are the distinct creation years of the departments managed by a secretary born in state 'Alabama'?"
A: Let’s think step by step. In the question "what are the distinct creation years of the departments managed by a secretary born in state 'Alabama'?", we are asked:
"distinct creation years of the departments" so we need column = [department.Creation]
"departments managed by" so we need column = [management.department_ID]
"born in" so we need column = [head.born_state]
Based on the columns and tables, we need these Foreign_keys = [department.Department_ID = management.department_ID,management.head_ID = head.head_ID].
Based on the tables, columns, and Foreign_keys, The set of possible cell values are = ['Alabama']. So the Schema_links are:
Schema_links: [department.Creation,department.Department_ID = management.department_ID,head.head_ID = management.head_ID,head.born_state,'Alabama']
Table Addresses, columns = [*,address_id,line_1,line_2,city,zip_postcode,state_province_county,country]
Table Candidate_Assessments, columns = [*,candidate_id,qualification,assessment_date,asessment_outcome_code]
Table Candidates, columns = [*,candidate_id,candidate_details]
Table Courses, columns = [*,course_id,course_name,course_description,other_details]
Table People, columns = [*,person_id,first_name,middle_name,last_name,cell_mobile_number,email_address,login_name,password]
Table People_Addresses, columns = [*,person_address_id,person_id,address_id,date_from,date_to]
Table Student_Course_Attendance, columns = [*,student_id,course_id,date_of_attendance]
Table Student_Course_Registrations, columns = [*,student_id,course_id,registration_date]
Table Students, columns = [*,student_id,student_details]
Foreign_keys = [Students.student_id = People.person_id,People_Addresses.address_id = Addresses.address_id,People_Addresses.person_id = People.person_id,Student_Course_Registrations.course_id = Courses.course_id,Student_Course_Registrations.student_id = Students.student_id,Student_Course_Attendance.student_id = Student_Course_Registrations.student_id,Student_Course_Attendance.course_id = Student_Course_Registrations.course_id,Candidates.candidate_id = People.person_id,Candidate_Assessments.candidate_id = Candidates.candidate_id]
Q: "List the id of students who never attends courses?"
A: Let’s think step by step. In the question "List the id of students who never attends courses?", we are asked:
"id of students" so we need column = [Students.student_id]
"never attends courses" so we need column = [Student_Course_Attendance.student_id]
Based on the columns and tables, we need these Foreign_keys = [Students.student_id = Student_Course_Attendance.student_id].
Based on the tables, columns, and Foreign_keys, The set of possible cell values are = []. So the Schema_links are:
Schema_links: [Students.student_id = Student_Course_Attendance.student_id]
Table Country, columns = [*,id,name]
Table League, columns = [*,id,country_id,name]
Table Player, columns = [*,id,player_api_id,player_name,player_fifa_api_id,birthday,height,weight]
Table Player_Attributes, columns = [*,id,player_fifa_api_id,player_api_id,date,overall_rating,potential,preferred_foot,attacking_work_rate,defensive_work_rate,crossing,finishing,heading_accuracy,short_passing,volleys,dribbling,curve,free_kick_accuracy,long_passing,ball_control,acceleration,sprint_speed,agility,reactions,balance,shot_power,jumping,stamina,strength,long_shots,aggression,interceptions,positioning,vision,penalties,marking,standing_tackle,sliding_tackle,gk_diving,gk_handling,gk_kicking,gk_positioning,gk_reflexes]
Table Team, columns = [*,id,team_api_id,team_fifa_api_id,team_long_name,team_short_name]
Table Team_Attributes, columns = [*,id,team_fifa_api_id,team_api_id,date,buildUpPlaySpeed,buildUpPlaySpeedClass,buildUpPlayDribbling,buildUpPlayDribblingClass,buildUpPlayPassing,buildUpPlayPassingClass,buildUpPlayPositioningClass,chanceCreationPassing,chanceCreationPassingClass,chanceCreationCrossing,chanceCreationCrossingClass,chanceCreationShooting,chanceCreationShootingClass,chanceCreationPositioningClass,defencePressure,defencePressureClass,defenceAggression,defenceAggressionClass,defenceTeamWidth,defenceTeamWidthClass,defenceDefenderLineClass]
Table sqlite_sequence, columns = [*,name,seq]
Foreign_keys = [Player_Attributes.player_api_id = Player.player_api_id,Player_Attributes.player_fifa_api_id = Player.player_fifa_api_id,League.country_id = Country.id,Team_Attributes.team_api_id = Team.team_api_id,Team_Attributes.team_fifa_api_id = Team.team_fifa_api_id]
Q: "List the names of all left-footed players who have overall rating between 85 and 90."
A: Let’s think step by step. In the question "List the names of all left-footed players who have overall rating between 85 and 90.", we are asked:
"names of all left-footed players" so we need column = [Player.player_name,Player_Attributes.preferred_foot]
"players who have overall rating" so we need column = [Player_Attributes.overall_rating]
Based on the columns and tables, we need these Foreign_keys = [Player_Attributes.player_api_id = Player.player_api_id].
Based on the tables, columns, and Foreign_keys, The set of possible cell values are = [left,85,90]. So the Schema_links are:
Schema_links: [Player.player_name,Player_Attributes.preferred_foot,Player_Attributes.overall_rating,Player_Attributes.player_api_id = Player.player_api_id,left,85,90]
Table advisor, columns = [*,s_ID,i_ID]
Table classroom, columns = [*,building,room_number,capacity]
Table course, columns = [*,course_id,title,dept_name,credits]
Table department, columns = [*,dept_name,building,budget]
Table instructor, columns = [*,ID,name,dept_name,salary]
Table prereq, columns = [*,course_id,prereq_id]
Table section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]
Table student, columns = [*,ID,name,dept_name,tot_cred]
Table takes, columns = [*,ID,course_id,sec_id,semester,year,grade]
Table teaches, columns = [*,ID,course_id,sec_id,semester,year]
Table time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]
Foreign_keys = [course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]
Q: "Give the title of the course offered in Chandler during the Fall of 2010."
A: Let’s think step by step. In the question "Give the title of the course offered in Chandler during the Fall of 2010.", we are asked:
"title of the course" so we need column = [course.title]
"course offered in Chandler" so we need column = [SECTION.building]
"during the Fall" so we need column = [SECTION.semester]
"of 2010" so we need column = [SECTION.year]
Based on the columns and tables, we need these Foreign_keys = [course.course_id = SECTION.course_id].
Based on the tables, columns, and Foreign_keys, The set of possible cell values are = [Chandler,Fall,2010]. So the Schema_links are:
Schema_links: [course.title,course.course_id = SECTION.course_id,SECTION.building,SECTION.year,SECTION.semester,Chandler,Fall,2010]
Table city, columns = [*,City_ID,Official_Name,Status,Area_km_2,Population,Census_Ranking]
Table competition_record, columns = [*,Competition_ID,Farm_ID,Rank]
Table farm, columns = [*,Farm_ID,Year,Total_Horses,Working_Horses,Total_Cattle,Oxen,Bulls,Cows,Pigs,Sheep_and_Goats]
Table farm_competition, columns = [*,Competition_ID,Year,Theme,Host_city_ID,Hosts]
Foreign_keys = [farm_competition.Host_city_ID = city.City_ID,competition_record.Farm_ID = farm.Farm_ID,competition_record.Competition_ID = farm_competition.Competition_ID]
Q: "Show the status of the city that has hosted the greatest number of competitions."
A: Let’s think step by step. In the question "Show the status of the city that has hosted the greatest number of competitions.", we are asked:
"the status of the city" so we need column = [city.Status]
"greatest number of competitions" so we need column = [farm_competition.*]
Based on the columns and tables, we need these Foreign_keys = [farm_competition.Host_city_ID = city.City_ID].
Based on the tables, columns, and Foreign_keys, The set of possible cell values are = []. So the Schema_links are:
Schema_links: [city.Status,farm_competition.Host_city_ID = city.City_ID,farm_competition.*]
Table advisor, columns = [*,s_ID,i_ID]
Table classroom, columns = [*,building,room_number,capacity]
Table course, columns = [*,course_id,title,dept_name,credits]
Table department, columns = [*,dept_name,building,budget]
Table instructor, columns = [*,ID,name,dept_name,salary]
Table prereq, columns = [*,course_id,prereq_id]
Table section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]
Table student, columns = [*,ID,name,dept_name,tot_cred]
Table takes, columns = [*,ID,course_id,sec_id,semester,year,grade]
Table teaches, columns = [*,ID,course_id,sec_id,semester,year]
Table time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]
Foreign_keys = [course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]
Q: "Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010."
A: Let’s think step by step. In the question "Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010.", we are asked:
"id of instructors who taught " so we need column = [teaches.id]
"taught a class in" so we need column = [teaches.semester,teaches.year]
Based on the columns and tables, we need these Foreign_keys = [].
Based on the tables, columns, and Foreign_keys, The set of possible cell values are = [Fall,2009,Spring,2010]. So the Schema_links are:
schema_links: [teaches.id,teaches.semester,teaches.year,Fall,2009,Spring,2010]
Table Accounts, columns = [*,account_id,customer_id,date_account_opened,account_name,other_account_details]
Table Customers, columns = [*,customer_id,customer_first_name,customer_middle_initial,customer_last_name,gender,email_address,login_name,login_password,phone_number,town_city,state_county_province,country]
Table Financial_Transactions, columns = [*,transaction_id,account_id,invoice_number,transaction_type,transaction_date,transaction_amount,transaction_comment,other_transaction_details]
Table Invoice_Line_Items, columns = [*,order_item_id,invoice_number,product_id,product_title,product_quantity,product_price,derived_product_cost,derived_vat_payable,derived_total_cost]
Table Invoices, columns = [*,invoice_number,order_id,invoice_date]
Table Order_Items, columns = [*,order_item_id,order_id,product_id,product_quantity,other_order_item_details]
Table Orders, columns = [*,order_id,customer_id,date_order_placed,order_details]
Table Product_Categories, columns = [*,production_type_code,product_type_description,vat_rating]
Table Products, columns = [*,product_id,parent_product_id,production_type_code,unit_price,product_name,product_color,product_size]
Foreign_keys = [Orders.customer_id = Customers.customer_id,Invoices.order_id = Orders.order_id,Accounts.customer_id = Customers.customer_id,Products.production_type_code = Product_Categories.production_type_code,Financial_Transactions.account_id = Accounts.account_id,Financial_Transactions.invoice_number = Invoices.invoice_number,Order_Items.order_id = Orders.order_id,Order_Items.product_id = Products.product_id,Invoice_Line_Items.product_id = Products.product_id,Invoice_Line_Items.invoice_number = Invoices.invoice_number,Invoice_Line_Items.order_item_id = Order_Items.order_item_id]
Q: "Show the id, the date of account opened, the account name, and other account detail for all accounts."
A: Let’s think step by step. In the question "Show the id, the date of account opened, the account name, and other account detail for all accounts.", we are asked:
"the id, the date of account opened, the account name, and other account detail for all accounts." so we need column = [Accounts.account_id,Accounts.account_name,Accounts.other_account_details,Accounts.date_account_opened]
Based on the columns and tables, we need these Foreign_keys = [].
Based on the tables, columns, and Foreign_keys, The set of possible cell values are = []. So the Schema_links are:
Schema_links: [Accounts.account_id,Accounts.account_name,Accounts.other_account_details,Accounts.date_account_opened]
Table city, columns = [*,City_ID,Official_Name,Status,Area_km_2,Population,Census_Ranking]
Table competition_record, columns = [*,Competition_ID,Farm_ID,Rank]
Table farm, columns = [*,Farm_ID,Year,Total_Horses,Working_Horses,Total_Cattle,Oxen,Bulls,Cows,Pigs,Sheep_and_Goats]
Table farm_competition, columns = [*,Competition_ID,Year,Theme,Host_city_ID,Hosts]
Foreign_keys = [farm_competition.Host_city_ID = city.City_ID,competition_record.Farm_ID = farm.Farm_ID,competition_record.Competition_ID = farm_competition.Competition_ID]
Q: "Show the status shared by cities with population bigger than 1500 and smaller than 500."
A: Let’s think step by step. In the question "Show the status shared by cities with population bigger than 1500 and smaller than 500.", we are asked:
"the status shared by cities" so we need column = [city.Status]
"cities with population" so we need column = [city.Population]
Based on the columns and tables, we need these Foreign_keys = [].
Based on the tables, columns, and Foreign_keys, The set of possible cell values are = [1500,500]. So the Schema_links are:
Schema_links: [city.Status,city.Population,1500,500]
'''
classification_prompt = '''Q: "Find the buildings which have rooms with capacity more than 50."
schema_links: [classroom.building,classroom.capacity,50]
A: Let’s think step by step. The SQL query for the question "Find the buildings which have rooms with capacity more than 50." needs these tables = [classroom], so we don't need JOIN.
Plus, it doesn't require nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [""].
So, we don't need JOIN and don't need nested queries, then the the SQL query can be classified as "EASY".
Label: "EASY"
Q: "What are the names of all instructors who advise students in the math depart sorted by total credits of the student."
schema_links: [advisor.i_id = instructor.id,advisor.s_id = student.id,instructor.name,student.dept_name,student.tot_cred,math]
A: Let’s think step by step. The SQL query for the question "What are the names of all instructors who advise students in the math depart sorted by total credits of the student." needs these tables = [advisor,instructor,student], so we need JOIN.
Plus, it doesn't need nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [""].
So, we need JOIN and don't need nested queries, then the the SQL query can be classified as "NON-NESTED".
Label: "NON-NESTED"
Q: "Find the room number of the rooms which can sit 50 to 100 students and their buildings."
schema_links: [classroom.building,classroom.room_number,classroom.capacity,50,100]
A: Let’s think step by step. The SQL query for the question "Find the room number of the rooms which can sit 50 to 100 students and their buildings." needs these tables = [classroom], so we don't need JOIN.
Plus, it doesn't require nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [""].
So, we don't need JOIN and don't need nested queries, then the the SQL query can be classified as "EASY".
Label: "EASY"
Q: "How many courses that do not have prerequisite?"
schema_links: [course.*,course.course_id = prereq.course_id]
A: Let’s think step by step. The SQL query for the question "How many courses that do not have prerequisite?" needs these tables = [course,prereq], so we need JOIN.
Plus, it requires nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = ["Which courses have prerequisite?"].
So, we need JOIN and need nested queries, then the the SQL query can be classified as "NESTED".
Label: "NESTED"
Q: "Find the title of course that is provided by both Statistics and Psychology departments."
schema_links: [course.title,course.dept_name,Statistics,Psychology]
A: Let’s think step by step. The SQL query for the question "Find the title of course that is provided by both Statistics and Psychology departments." needs these tables = [course], so we don't need JOIN.
Plus, it requires nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = ["Find the titles of courses that is provided by Psychology departments"].
So, we don't need JOIN and need nested queries, then the the SQL query can be classified as "NESTED".
Label: "NESTED"
Q: "Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010."
schema_links: [teaches.id,teaches.semester,teaches.year,Fall,2009,Spring,2010]
A: Let’s think step by step. The SQL query for the question "Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010." needs these tables = [teaches], so we don't need JOIN.
Plus, it requires nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = ["Find the id of instructors who taught a class in Spring 2010"].
So, we don't need JOIN and need nested queries, then the the SQL query can be classified as "NESTED".
Label: "NESTED"
Q: "Find the name of the department that offers the highest total credits?"
schema_links: [course.dept_name,course.credits]
A: Let’s think step by step. The SQL query for the question "Find the name of the department that offers the highest total credits?." needs these tables = [course], so we don't need JOIN.
Plus, it doesn't require nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [""].
So, we don't need JOIN and don't need nested queries, then the the SQL query can be classified as "EASY".
Label: "EASY"
Q: "What is the name of the instructor who advises the student with the greatest number of total credits?"
schema_links: [advisor.i_id = instructor.id,advisor.s_id = student.id,instructor.name,student.tot_cred ]
A: Let’s think step by step. The SQL query for the question "What is the name of the instructor who advises the student with the greatest number of total credits?" needs these tables = [advisor,instructor,student], so we need JOIN.
Plus, it doesn't need nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [""].
So, we need JOIN and don't need nested queries, then the the SQL query can be classified as "NON-NESTED".
Label: "NON-NESTED"
Q: "Find the total number of students and total number of instructors for each department."
schema_links = [department.dept_name = instructor.dept_name,student.id,student.dept_name = department.dept_name,instructor.id]
A: Let’s think step by step. The SQL query for the question "Find the total number of students and total number of instructors for each department." needs these tables = [department,instructor,student], so we need JOIN.
Plus, it doesn't need nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [""].
So, we need JOIN and don't need nested queries, then the the SQL query can be classified as "NON-NESTED".
Label: "NON-NESTED"
Q: "Give the name and building of the departments with greater than average budget."
schema_links: [department.budget,department.dept_name,department.building]
A: Let’s think step by step. The SQL query for the question "Give the name and building of the departments with greater than average budget." needs these tables = [department], so we don't need JOIN.
Plus, it requires nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = ["What is the average budget of the departments"].
So, we don't need JOIN and need nested queries, then the the SQL query can be classified as "NESTED".
Label: "NESTED"
'''
easy_prompt = '''Q: "Find the buildings which have rooms with capacity more than 50."
Schema_links: [classroom.building,classroom.capacity,50]
SQL: SELECT DISTINCT building FROM classroom WHERE capacity > 50
Q: "Find the room number of the rooms which can sit 50 to 100 students and their buildings."
Schema_links: [classroom.building,classroom.room_number,classroom.capacity,50,100]
SQL: SELECT building , room_number FROM classroom WHERE capacity BETWEEN 50 AND 100
Q: "Give the name of the student in the History department with the most credits."
Schema_links: [student.name,student.dept_name,student.tot_cred,History]
SQL: SELECT name FROM student WHERE dept_name = 'History' ORDER BY tot_cred DESC LIMIT 1
Q: "Find the total budgets of the Marketing or Finance department."
Schema_links: [department.budget,department.dept_name,Marketing,Finance]
SQL: SELECT sum(budget) FROM department WHERE dept_name = 'Marketing' OR dept_name = 'Finance'
Q: "Find the department name of the instructor whose name contains 'Soisalon'."
Schema_links: [instructor.dept_name,instructor.name,Soisalon]
SQL: SELECT dept_name FROM instructor WHERE name LIKE '%Soisalon%'
Q: "What is the name of the department with the most credits?"
Schema_links: [course.dept_name,course.credits]
SQL: SELECT dept_name FROM course GROUP BY dept_name ORDER BY sum(credits) DESC LIMIT 1
Q: "How many instructors teach a course in the Spring of 2010?"
Schema_links: [teaches.ID,teaches.semester,teaches.YEAR,Spring,2010]
SQL: SELECT COUNT (DISTINCT ID) FROM teaches WHERE semester = 'Spring' AND YEAR = 2010
Q: "Find the name of the students and their department names sorted by their total credits in ascending order."
Schema_links: [student.name,student.dept_name,student.tot_cred]
SQL: SELECT name , dept_name FROM student ORDER BY tot_cred
Q: "Find the year which offers the largest number of courses."
Schema_links: [SECTION.YEAR,SECTION.*]
SQL: SELECT YEAR FROM SECTION GROUP BY YEAR ORDER BY count(*) DESC LIMIT 1
Q: "What are the names and average salaries for departments with average salary higher than 42000?"
Schema_links: [instructor.dept_name,instructor.salary,42000]
SQL: SELECT dept_name , AVG (salary) FROM instructor GROUP BY dept_name HAVING AVG (salary) > 42000
Q: "How many rooms in each building have a capacity of over 50?"
Schema_links: [classroom.*,classroom.building,classroom.capacity,50]
SQL: SELECT count(*) , building FROM classroom WHERE capacity > 50 GROUP BY building
Q: "Find the names of the top 3 departments that provide the largest amount of courses?"
Schema_links: [course.dept_name,course.*]
SQL: SELECT dept_name FROM course GROUP BY dept_name ORDER BY count(*) DESC LIMIT 3
Q: "Find the maximum and average capacity among rooms in each building."
Schema_links: [classroom.building,classroom.capacity]
SQL: SELECT max(capacity) , avg(capacity) , building FROM classroom GROUP BY building
Q: "Find the title of the course that is offered by more than one department."
Schema_links: [course.title]
SQL: SELECT title FROM course GROUP BY title HAVING count(*) > 1
'''
medium_prompt = '''Q: "Find the total budgets of the Marketing or Finance department."
Schema_links: [department.budget,department.dept_name,Marketing,Finance]
A: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = []. First, create an intermediate representation, then use it to construct the SQL query.
Intermediate_representation: select sum(department.budget) from department where department.dept_name = \"Marketing\" or department.dept_name = \"Finance\"
SQL: SELECT sum(budget) FROM department WHERE dept_name = 'Marketing' OR dept_name = 'Finance'
Q: "Find the name and building of the department with the highest budget."
Schema_links: [department.budget,department.dept_name,department.building]
A: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = []. First, create an intermediate representation, then use it to construct the SQL query.
Intermediate_representation: select department.dept_name , department.building from department order by department.budget desc limit 1
SQL: SELECT dept_name , building FROM department ORDER BY budget DESC LIMIT 1
Q: "What is the name and building of the departments whose budget is more than the average budget?"
Schema_links: [department.budget,department.dept_name,department.building]
A: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = []. First, create an intermediate representation, then use it to construct the SQL query.
Intermediate_representation: select department.dept_name , department.building from department where @.@ > avg ( department.budget )
SQL: SELECT dept_name , building FROM department WHERE budget > (SELECT avg(budget) FROM department)
Q: "Find the total number of students and total number of instructors for each department."
Schema_links: [department.dept_name = student.dept_name,student.id,department.dept_name = instructor.dept_name,instructor.id]
A: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = [department,student,instructor]. First, create an intermediate representation, then use it to construct the SQL query.
Intermediate_representation: "select count( distinct student.ID) , count( distinct instructor.ID) , department.dept_name from department group by instructor.dept_name
SQL: SELECT count(DISTINCT T2.id) , count(DISTINCT T3.id) , T3.dept_name FROM department AS T1 JOIN student AS T2 ON T1.dept_name = T2.dept_name JOIN instructor AS T3 ON T1.dept_name = T3.dept_name GROUP BY T3.dept_name
Q: "Find the title of courses that have two prerequisites?"
Schema_links: [course.title,course.course_id = prereq.course_id]
A: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = [course,prereq]. First, create an intermediate representation, then use it to construct the SQL query.
Intermediate_representation: select course.title from course where count ( prereq.* ) = 2 group by prereq.course_id
SQL: SELECT T1.title FROM course AS T1 JOIN prereq AS T2 ON T1.course_id = T2.course_id GROUP BY T2.course_id HAVING count(*) = 2
Q: "Find the name of students who took any class in the years of 2009 and 2010."
Schema_links: [student.name,student.id = takes.id,takes.YEAR,2009,2010]
A: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = [student,takes]. First, create an intermediate representation, then use it to construct the SQL query.
Intermediate_representation: select distinct student.name from student where takes.year = 2009 or takes.year = 2010
SQL: SELECT DISTINCT T1.name FROM student AS T1 JOIN takes AS T2 ON T1.id = T2.id WHERE T2.YEAR = 2009 OR T2.YEAR = 2010
Q: "list in alphabetic order all course names and their instructors' names in year 2008."
Schema_links: [course.title,course.course_id = teaches.course_id,teaches.id = instructor.id,instructor.name,teaches.year,2008]
A: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = [course,teaches,instructor]. First, create an intermediate representation, then use it to construct the SQL query.
Intermediate_representation: select course.title , instructor.name from course where teaches.year = 2008 order by course.title asc
SQL: SELECT T1.title , T3.name FROM course AS T1 JOIN teaches AS T2 ON T1.course_id = T2.course_id JOIN instructor AS T3 ON T2.id = T3.id WHERE T2.YEAR = 2008 ORDER BY T1.title
'''
hard_prompt = '''Q: "Find the title of courses that have two prerequisites?"
Schema_links: [course.title,course.course_id = prereq.course_id]
A: Let's think step by step. "Find the title of courses that have two prerequisites?" can be solved by knowing the answer to the following sub-question "What are the titles for courses with two prerequisites?".
The SQL query for the sub-question "What are the titles for courses with two prerequisites?" is SELECT T1.title FROM course AS T1 JOIN prereq AS T2 ON T1.course_id = T2.course_id GROUP BY T2.course_id HAVING count(*) = 2
So, the answer to the question "Find the title of courses that have two prerequisites?" is =
Intermediate_representation: select course.title from course where count ( prereq.* ) = 2 group by prereq.course_id
SQL: SELECT T1.title FROM course AS T1 JOIN prereq AS T2 ON T1.course_id = T2.course_id GROUP BY T2.course_id HAVING count(*) = 2
Q: "Find the name and building of the department with the highest budget."
Schema_links: [department.dept_name,department.building,department.budget]
A: Let's think step by step. "Find the name and building of the department with the highest budget." can be solved by knowing the answer to the following sub-question "What is the department name and corresponding building for the department with the greatest budget?".
The SQL query for the sub-question "What is the department name and corresponding building for the department with the greatest budget?" is SELECT dept_name , building FROM department ORDER BY budget DESC LIMIT 1
So, the answer to the question "Find the name and building of the department with the highest budget." is =
Intermediate_representation: select department.dept_name , department.building from department order by department.budget desc limit 1
SQL: SELECT dept_name , building FROM department ORDER BY budget DESC LIMIT 1
Q: "Find the title, credit, and department name of courses that have more than one prerequisites?"
Schema_links: [course.title,course.credits,course.dept_name,course.course_id = prereq.course_id]
A: Let's think step by step. "Find the title, credit, and department name of courses that have more than one prerequisites?" can be solved by knowing the answer to the following sub-question "What is the title, credit value, and department name for courses with more than one prerequisite?".
The SQL query for the sub-question "What is the title, credit value, and department name for courses with more than one prerequisite?" is SELECT T1.title , T1.credits , T1.dept_name FROM course AS T1 JOIN prereq AS T2 ON T1.course_id = T2.course_id GROUP BY T2.course_id HAVING count(*) > 1
So, the answer to the question "Find the name and building of the department with the highest budget." is =
Intermediate_representation: select course.title , course.credits , course.dept_name from course where count ( prereq.* ) > 1 group by prereq.course_id
SQL: SELECT T1.title , T1.credits , T1.dept_name FROM course AS T1 JOIN prereq AS T2 ON T1.course_id = T2.course_id GROUP BY T2.course_id HAVING count(*) > 1
Q: "Give the name and building of the departments with greater than average budget."
Schema_links: [department.dept_name,department.building,department.budget]
A: Let's think step by step. "Give the name and building of the departments with greater than average budget." can be solved by knowing the answer to the following sub-question "What is the average budget of departments?".
The SQL query for the sub-question "What is the average budget of departments?" is SELECT avg(budget) FROM department
So, the answer to the question "Give the name and building of the departments with greater than average budget." is =
Intermediate_representation: select department.dept_name , department.building from department where @.@ > avg ( department.budget )
SQL: SELECT dept_name , building FROM department WHERE budget > (SELECT avg(budget) FROM department)
Q: "Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010."
Schema_links: [teaches.id,teaches.semester,teaches.YEAR,Fall,2009,Spring,2010]
A: Let's think step by step. "Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010." can be solved by knowing the answer to the following sub-question "Find the id of instructors who taught a class in Spring 2010".
The SQL query for the sub-question "Find the id of instructors who taught a class in Spring 2010" is SELECT id FROM teaches WHERE semester = 'Spring' AND YEAR = 2010
So, the answer to the question "Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010." is =
Intermediate_representation: select teaches.ID from teaches where teaches.semester = \"Fall\" and teaches.year = 2009 and teaches.semester != \"Spring\" and teaches.year = 2010
SQL: SELECT id FROM teaches WHERE semester = 'Fall' AND YEAR = 2009 EXCEPT SELECT id FROM teaches WHERE semester = 'Spring' AND YEAR = 2010
Q: "Find the name of the courses that do not have any prerequisite?"
Schema_links: [course.title,course.course_id]
A: Let's think step by step. "Find the name of the courses that do not have any prerequisite?" can be solved by knowing the answer to the following sub-question "What are the courses that have any prerequisite?".
The SQL query for the sub-question "What are the courses that have any prerequisite?" is SELECT course_id FROM prereq
So, the answer to the question "Find the name of the courses that do not have any prerequisite?" is =
Intermediate_representation: select course.title from course where @.@ not in prereq.course_id
SQL: SELECT title FROM course WHERE course_id NOT IN (SELECT course_id FROM prereq)
Q: "Find the salaries of all distinct instructors that are less than the largest salary."
Schema_links: [instructor.salary]
A: Let's think step by step. "Find the salaries of all distinct instructors that are less than the largest salary." can be solved by knowing the answer to the following sub-question "What is the largest salary of instructors".
The SQL query for the sub-question "What is the largest salary of instructors" is SELECT max(salary) FROM instructor
So, the answer to the question "Find the salaries of all distinct instructors that are less than the largest salary." is =
Intermediate_representation: select distinct instructor.salary from instructor where @.@ < max ( instructor.salary )
SQL: SELECT DISTINCT salary FROM instructor WHERE salary < (SELECT max(salary) FROM instructor)
Q: "Find the names of students who have taken any course in the fall semester of year 2003."
Schema_links: [student.id,student.name,takes.id,takes.semester,fall,2003]
A: Let's think step by step. "Find the names of students who have taken any course in the fall semester of year 2003." can be solved by knowing the answer to the following sub-question "Find the students who have taken any course in the fall semester of year 2003.".
The SQL query for the sub-question "Find the students who have taken any course in the fall semester of year 2003." is SELECT id FROM takes WHERE semester = 'Fall' AND YEAR = 2003
So, the answer to the question "Find the names of students who have taken any course in the fall semester of year 2003." is =
Intermediate_representation: select student.name from student where takes.semester = \"Fall\" and takes.year = 2003
SQL: SELECT name FROM student WHERE id IN (SELECT id FROM takes WHERE semester = 'Fall' AND YEAR = 2003)
Q: "Find the minimum salary for the departments whose average salary is above the average payment of all instructors."
Schema_links: [instructor.salary,instructor.dept_name]
A: Let's think step by step. "Find the minimum salary for the departments whose average salary is above the average payment of all instructors." can be solved by knowing the answer to the following sub-question "What is the average payment of all instructors.".
The SQL query for the sub-question "What is the average payment of all instructors." is SELECT avg(salary) FROM instructor
So, the answer to the question "Find the minimum salary for the departments whose average salary is above the average payment of all instructors." is =
Intermediate_representation: select min(instructor.salary) , instructor.dept_name from instructor where avg ( instructor.salary ) > avg ( instructor.salary ) group by instructor.dept_name
SQL: SELECT min(salary) , dept_name FROM instructor GROUP BY dept_name HAVING avg(salary) > (SELECT avg(salary) FROM instructor)
Q: "What is the course title of the prerequisite of course Mobile Computing?"
Schema_links: [course.title,course.course_id = prereq.course_id,prereq.prereq_id,course.title,Mobile Computing]
A: Let's think step by step. "What is the course title of the prerequisite of course Mobile Computing?" can be solved by knowing the answer to the following sub-question "What are the ids of the prerequisite of course Mobile Computing?".
The SQL query for the sub-question "What are the ids of the prerequisite of course Mobile Computing?" is SSELECT T1.prereq_id FROM prereq AS T1 JOIN course AS T2 ON T1.course_id = T2.course_id WHERE T2.title = 'Mobile Computing'
So, the answer to the question "What is the course title of the prerequisite of course Mobile Computing?" is =
Intermediate_representation: select course.title from course where @.@ in prereq.* and course.title = \"Mobile Computing\"
SQL: SELECT title FROM course WHERE course_id IN (SELECT T1.prereq_id FROM prereq AS T1 JOIN course AS T2 ON T1.course_id = T2.course_id WHERE T2.title = 'Mobile Computing')
Q: "Give the title and credits for the course that is taught in the classroom with the greatest capacity."
Schema_links: [classroom.capacity,classroom.building = SECTION.building,classroom.room_number = SECTION.room_number,course.title,course.credits,course.course_id = SECTION.course_id]
A: Let's think step by step. "Give the title and credits for the course that is taught in the classroom with the greatest capacity." can be solved by knowing the answer to the following sub-question "What is the capacity of the largest room?".
The SQL query for the sub-question "What is the capacity of the largest room?" is (SELECT max(capacity) FROM classroom)
So, the answer to the question "Give the title and credits for the course that is taught in the classroom with the greatest capacity." is =
Intermediate_representation: select course.title , course.credits from classroom order by classroom.capacity desc limit 1"
SQL: SELECT T3.title , T3.credits FROM classroom AS T1 JOIN SECTION AS T2 ON T1.building = T2.building AND T1.room_number = T2.room_number JOIN course AS T3 ON T2.course_id = T3.course_id WHERE T1.capacity = (SELECT max(capacity) FROM classroom)
'''
#----------------------------------------------------------------------------------------------------------
if sys.argv[1] == "--dataset" and sys.argv[3] == "--output":
DATASET_SCHEMA = sys.argv[2]+"tables.json"
DATASET = sys.argv[2]+"dev.json"
OUTPUT_FILE = sys.argv[4]
else:
raise Exception("Please use this format python CoT.py --dataset data/ --output predicted_sql.txt")
API_KEY = #key
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = os.getenv("OPENAI_API_KEY")
def load_data(DATASET):
return pd.read_json(DATASET)
def hard_prompt_maker(test_sample_text,database,schema_links,sub_questions):
instruction = "# Use the intermediate representation and the schema links to generate the SQL queries for each of the questions.\n"
fields = find_fields_MYSQL_like("college_2")
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like("college_2") + '\n'
fields += find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
stepping = f'''\nA: Let's think step by step. "{test_sample_text}" can be solved by knowing the answer to the following sub-question "{sub_questions}".'''
fields += "\n"
prompt = instruction +fields + hard_prompt + 'Q: "' + test_sample_text + '"' + '\nschema_links: ' + schema_links + stepping +'\nThe SQL query for the sub-question"'
return prompt
def medium_prompt_maker(test_sample_text,database,schema_links):
instruction = "# Use the the schema links and Intermediate_representation to generate the SQL queries for each of the questions.\n"
fields = find_fields_MYSQL_like("college_2")
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like("college_2") + '\n'
fields += find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "\n"
prompt = instruction +fields + medium_prompt + 'Q: "' + test_sample_text + '\nSchema_links: ' + schema_links + '\nA: Let’s think step by step.'
return prompt
def easy_prompt_maker(test_sample_text,database,schema_links):
instruction = "# Use the the schema links to generate the SQL queries for each of the questions.\n"
fields = find_fields_MYSQL_like("college_2")
fields += find_fields_MYSQL_like(database)
fields += "\n"
prompt = instruction +fields + easy_prompt + 'Q: "' + test_sample_text + '\nSchema_links: ' + schema_links + '\nSQL:'
return prompt
def classification_prompt_maker(test_sample_text,database,schema_links):
instruction = "# For the given question, classify it as EASY, NON-NESTED, or NESTED based on nested queries and JOIN.\n"
instruction += "\nif need nested queries: predict NESTED\n"
instruction += "elif need JOIN and don't need nested queries: predict NON-NESTED\n"
instruction += "elif don't need JOIN and don't need nested queries: predict EASY\n\n"
fields = find_fields_MYSQL_like("college_2")
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like("college_2") + '\n'
fields += find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "\n"
prompt = instruction + fields + classification_prompt + 'Q: "' + test_sample_text + '\nschema_links: ' + schema_links + '\nA: Let’s think step by step.'
return prompt
def schema_linking_prompt_maker(test_sample_text,database):
instruction = "# Find the schema_links for generating SQL queries for each question based on the database schema and Foreign keys.\n"
fields = find_fields_MYSQL_like(database)
foreign_keys = "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
prompt = instruction + schema_linking_prompt + fields +foreign_keys+ 'Q: "' + test_sample_text + """"\nA: Let’s think step by step."""
return prompt
def find_foreign_keys_MYSQL_like(db_name):
df = spider_foreign[spider_foreign['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['First Table Name'] + '.' + row['First Table Foreign Key'] + " = " + row['Second Table Name'] + '.' + row['Second Table Foreign Key'] + ','
output= output[:-1] + "]"
return output
def find_fields_MYSQL_like(db_name):
df = spider_schema[spider_schema['Database name'] == db_name]
df = df.groupby(' Table Name')
output = ""
for name, group in df:
output += "Table " +name+ ', columns = ['
for index, row in group.iterrows():
output += row[" Field Name"]+','
output = output[:-1]
output += "]\n"
return output
def find_primary_keys_MYSQL_like(db_name):
df = spider_primary[spider_primary['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['Table Name'] + '.' + row['Primary Key'] +','
output = output[:-1]
output += "]\n"
return output
def creatiing_schema(DATASET_JSON):
schema_df = pd.read_json(DATASET_JSON)
schema_df = schema_df.drop(['column_names','table_names'], axis=1)
schema = []
f_keys = []
p_keys = []
for index, row in schema_df.iterrows():
tables = row['table_names_original']
col_names = row['column_names_original']
col_types = row['column_types']
foreign_keys = row['foreign_keys']
primary_keys = row['primary_keys']
for col, col_type in zip(col_names, col_types):
index, col_name = col
if index == -1:
for table in tables:
schema.append([row['db_id'], table, '*', 'text'])
else:
schema.append([row['db_id'], tables[index], col_name, col_type])
for primary_key in primary_keys:
index, column = col_names[primary_key]
p_keys.append([row['db_id'], tables[index], column])
for foreign_key in foreign_keys:
first, second = foreign_key
first_index, first_column = col_names[first]
second_index, second_column = col_names[second]
f_keys.append([row['db_id'], tables[first_index], tables[second_index], first_column, second_column])
spider_schema = pd.DataFrame(schema, columns=['Database name', ' Table Name', ' Field Name', ' Type'])
spider_primary = pd.DataFrame(p_keys, columns=['Database name', 'Table Name', 'Primary Key'])
spider_foreign = pd.DataFrame(f_keys,
columns=['Database name', 'First Table Name', 'Second Table Name', 'First Table Foreign Key',
'Second Table Foreign Key'])
return spider_schema,spider_primary,spider_foreign
def debuger(test_sample_text,database,sql):
instruction = """#### For the given question, use the provided tables, columns, foreign keys, and primary keys to fix the given SQLite SQL QUERY for any issues. If there are any problems, fix them. If there are no issues, return the SQLite SQL QUERY as is.
#### Use the following instructions for fixing the SQL QUERY:
1) Use the database values that are explicitly mentioned in the question.
2) Pay attention to the columns that are used for the JOIN by using the Foreign_keys.
3) Use DESC and DISTINCT when needed.
4) Pay attention to the columns that are used for the GROUP BY statement.
5) Pay attention to the columns that are used for the SELECT statement.
6) Only change the GROUP BY clause when necessary (Avoid redundant columns in GROUP BY).
7) Use GROUP BY on one column only.
"""
fields = find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "Primary_keys = " + find_primary_keys_MYSQL_like(database)
prompt = instruction + fields+ '#### Question: ' + test_sample_text + '\n#### SQLite SQL QUERY\n' + sql +'\n#### SQLite FIXED SQL QUERY\nSELECT'
return prompt
def GPT4_generation(prompt):
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=600,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
stop = ["Q:"]
)
return response['choices'][0]['message']['content']
def GPT4_debug(prompt):
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=350,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
stop = ["#", ";","\n\n"]
)
return response['choices'][0]['message']['content']
if __name__ == '__main__':
spider_schema,spider_primary,spider_foreign = creatiing_schema(DATASET_SCHEMA)
val_df = load_data(DATASET)
print(f"Number of data samples {val_df.shape[0]}")
CODEX = []
for index, row in val_df.iterrows():
#if index < 405: continue #for testing
print(f"index is {index}")
print(row['query'])
print(row['question'])
schema_links = None
while schema_links is None:
try:
schema_links = GPT4_generation(
schema_linking_prompt_maker(row['question'], row['db_id']))
except:
time.sleep(3)
pass
try:
schema_links = schema_links.split("Schema_links: ")[1]
except:
print("Slicing error for the schema_linking module")
schema_links = "[]"
#print(schema_links)
classification = None
while classification is None:
try:
classification = GPT4_generation(
classification_prompt_maker(row['question'], row['db_id'], schema_links[1:]))
except:
time.sleep(3)
pass
try:
predicted_class = classification.split("Label: ")[1]
except:
print("Slicing error for the classification module")
predicted_class = '"NESTED"'
#print(classification)
if '"EASY"' in predicted_class:
print("EASY")
SQL = None
while SQL is None:
try:
SQL = GPT4_generation(easy_prompt_maker(row['question'], row['db_id'], schema_links))
except:
time.sleep(3)
pass
elif '"NON-NESTED"' in predicted_class:
print("NON-NESTED")
SQL = None
while SQL is None:
try:
SQL = GPT4_generation(medium_prompt_maker(row['question'], row['db_id'], schema_links))
except:
time.sleep(3)
pass
try:
SQL = SQL.split("SQL: ")[1]
except:
print("SQL slicing error")
SQL = "SELECT"
else:
sub_questions = classification.split('questions = ["')[1].split('"]')[0]
print("NESTED")
SQL = None
while SQL is None:
try:
SQL = GPT4_generation(
hard_prompt_maker(row['question'], row['db_id'], schema_links, sub_questions))
except:
time.sleep(3)
pass
try:
SQL = SQL.split("SQL: ")[1]
except:
print("SQL slicing error")
SQL = "SELECT"
print(SQL)
debugged_SQL = None
while debugged_SQL is None:
try:
debugged_SQL = GPT4_debug(debuger(row['question'], row['db_id'], SQL)).replace("\n", " ")
except:
time.sleep(3)
pass
SQL = "SELECT " + debugged_SQL
print(SQL)
CODEX.append([row['question'], SQL, row['query'], row['db_id']])
#break
df = pd.DataFrame(CODEX, columns=['NLQ', 'PREDICTED SQL', 'GOLD SQL', 'DATABASE'])
results = df['PREDICTED SQL'].tolist()
with open(OUTPUT_FILE, 'w') as f:
for line in results:
f.write(f"{line}\n") | [
"Q: \"Find the buildings which have rooms with capacity more than 50.\"\nschema_links: [classroom.building,classroom.capacity,50]\nA: Let’s think step by step. The SQL query for the question \"Find the buildings which have rooms with capacity more than 50.\" needs these tables = [classroom], so we don't need JOIN.\nPlus, it doesn't require nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [\"\"].\nSo, we don't need JOIN and don't need nested queries, then the the SQL query can be classified as \"EASY\".\nLabel: \"EASY\"\n\nQ: \"What are the names of all instructors who advise students in the math depart sorted by total credits of the student.\"\nschema_links: [advisor.i_id = instructor.id,advisor.s_id = student.id,instructor.name,student.dept_name,student.tot_cred,math]\nA: Let’s think step by step. The SQL query for the question \"What are the names of all instructors who advise students in the math depart sorted by total credits of the student.\" needs these tables = [advisor,instructor,student], so we need JOIN.\nPlus, it doesn't need nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [\"\"].\nSo, we need JOIN and don't need nested queries, then the the SQL query can be classified as \"NON-NESTED\".\nLabel: \"NON-NESTED\"\n\nQ: \"Find the room number of the rooms which can sit 50 to 100 students and their buildings.\"\nschema_links: [classroom.building,classroom.room_number,classroom.capacity,50,100]\nA: Let’s think step by step. The SQL query for the question \"Find the room number of the rooms which can sit 50 to 100 students and their buildings.\" needs these tables = [classroom], so we don't need JOIN.\nPlus, it doesn't require nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [\"\"].\nSo, we don't need JOIN and don't need nested queries, then the the SQL query can be classified as \"EASY\".\nLabel: \"EASY\"\n\nQ: \"How many courses that do not have prerequisite?\"\nschema_links: [course.*,course.course_id = prereq.course_id]\nA: Let’s think step by step. The SQL query for the question \"How many courses that do not have prerequisite?\" needs these tables = [course,prereq], so we need JOIN.\nPlus, it requires nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [\"Which courses have prerequisite?\"].\nSo, we need JOIN and need nested queries, then the the SQL query can be classified as \"NESTED\".\nLabel: \"NESTED\"\n\nQ: \"Find the title of course that is provided by both Statistics and Psychology departments.\"\nschema_links: [course.title,course.dept_name,Statistics,Psychology]\nA: Let’s think step by step. The SQL query for the question \"Find the title of course that is provided by both Statistics and Psychology departments.\" needs these tables = [course], so we don't need JOIN.\nPlus, it requires nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [\"Find the titles of courses that is provided by Psychology departments\"].\nSo, we don't need JOIN and need nested queries, then the the SQL query can be classified as \"NESTED\".\nLabel: \"NESTED\"\n\nQ: \"Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010.\"\nschema_links: [teaches.id,teaches.semester,teaches.year,Fall,2009,Spring,2010]\nA: Let’s think step by step. The SQL query for the question \"Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010.\" needs these tables = [teaches], so we don't need JOIN.\nPlus, it requires nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [\"Find the id of instructors who taught a class in Spring 2010\"].\nSo, we don't need JOIN and need nested queries, then the the SQL query can be classified as \"NESTED\".\nLabel: \"NESTED\"\n\nQ: \"Find the name of the department that offers the highest total credits?\"\nschema_links: [course.dept_name,course.credits]\nA: Let’s think step by step. The SQL query for the question \"Find the name of the department that offers the highest total credits?.\" needs these tables = [course], so we don't need JOIN.\nPlus, it doesn't require nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [\"\"].\nSo, we don't need JOIN and don't need nested queries, then the the SQL query can be classified as \"EASY\".\nLabel: \"EASY\"\n\nQ: \"What is the name of the instructor who advises the student with the greatest number of total credits?\"\nschema_links: [advisor.i_id = instructor.id,advisor.s_id = student.id,instructor.name,student.tot_cred ]\nA: Let’s think step by step. The SQL query for the question \"What is the name of the instructor who advises the student with the greatest number of total credits?\" needs these tables = [advisor,instructor,student], so we need JOIN.\nPlus, it doesn't need nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [\"\"].\nSo, we need JOIN and don't need nested queries, then the the SQL query can be classified as \"NON-NESTED\".\nLabel: \"NON-NESTED\"\n\nQ: \"Find the total number of students and total number of instructors for each department.\"\nschema_links = [department.dept_name = instructor.dept_name,student.id,student.dept_name = department.dept_name,instructor.id]\nA: Let’s think step by step. The SQL query for the question \"Find the total number of students and total number of instructors for each department.\" needs these tables = [department,instructor,student], so we need JOIN.\nPlus, it doesn't need nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [\"\"].\nSo, we need JOIN and don't need nested queries, then the the SQL query can be classified as \"NON-NESTED\".\nLabel: \"NON-NESTED\"\n\nQ: \"Give the name and building of the departments with greater than average budget.\"\nschema_links: [department.budget,department.dept_name,department.building]\nA: Let’s think step by step. The SQL query for the question \"Give the name and building of the departments with greater than average budget.\" needs these tables = [department], so we don't need JOIN.\nPlus, it requires nested queries with (INTERSECT, UNION, EXCEPT, IN, NOT IN), and we need the answer to the questions = [\"What is the average budget of the departments\"].\nSo, we don't need JOIN and need nested queries, then the the SQL query can be classified as \"NESTED\".\nLabel: \"NESTED\"\n\n",
"Table advisor, columns = [*,s_ID,i_ID]\nTable classroom, columns = [*,building,room_number,capacity]\nTable course, columns = [*,course_id,title,dept_name,credits]\nTable department, columns = [*,dept_name,building,budget]\nTable instructor, columns = [*,ID,name,dept_name,salary]\nTable prereq, columns = [*,course_id,prereq_id]\nTable section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]\nTable student, columns = [*,ID,name,dept_name,tot_cred]\nTable takes, columns = [*,ID,course_id,sec_id,semester,year,grade]\nTable teaches, columns = [*,ID,course_id,sec_id,semester,year]\nTable time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]\nForeign_keys = [course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]\nQ: \"Find the buildings which have rooms with capacity more than 50.\"\nA: Let’s think step by step. In the question \"Find the buildings which have rooms with capacity more than 50.\", we are asked:\n\"the buildings which have rooms\" so we need column = [classroom.capacity]\n\"rooms with capacity\" so we need column = [classroom.building]\nBased on the columns and tables, we need these Foreign_keys = [].\nBased on the tables, columns, and Foreign_keys, The set of possible cell values are = [50]. So the Schema_links are:\nSchema_links: [classroom.building,classroom.capacity,50]\n\nTable department, columns = [*,Department_ID,Name,Creation,Ranking,Budget_in_Billions,Num_Employees]\nTable head, columns = [*,head_ID,name,born_state,age]\nTable management, columns = [*,department_ID,head_ID,temporary_acting]\nForeign_keys = [management.head_ID = head.head_ID,management.department_ID = department.Department_ID]\nQ: \"How many heads of the departments are older than 56 ?\"\nA: Let’s think step by step. In the question \"How many heads of the departments are older than 56 ?\", we are asked:\n\"How many heads of the departments\" so we need column = [head.*]\n\"older\" so we need column = [head.age]\nBased on the columns and tables, we need these Foreign_keys = [].\nBased on the tables, columns, and Foreign_keys, The set of possible cell values are = [56]. So the Schema_links are:\nSchema_links: [head.*,head.age,56]\n\nTable department, columns = [*,Department_ID,Name,Creation,Ranking,Budget_in_Billions,Num_Employees]\nTable head, columns = [*,head_ID,name,born_state,age]\nTable management, columns = [*,department_ID,head_ID,temporary_acting]\nForeign_keys = [management.head_ID = head.head_ID,management.department_ID = department.Department_ID]\nQ: \"what are the distinct creation years of the departments managed by a secretary born in state 'Alabama'?\"\nA: Let’s think step by step. In the question \"what are the distinct creation years of the departments managed by a secretary born in state 'Alabama'?\", we are asked:\n\"distinct creation years of the departments\" so we need column = [department.Creation]\n\"departments managed by\" so we need column = [management.department_ID]\n\"born in\" so we need column = [head.born_state]\nBased on the columns and tables, we need these Foreign_keys = [department.Department_ID = management.department_ID,management.head_ID = head.head_ID].\nBased on the tables, columns, and Foreign_keys, The set of possible cell values are = ['Alabama']. So the Schema_links are:\nSchema_links: [department.Creation,department.Department_ID = management.department_ID,head.head_ID = management.head_ID,head.born_state,'Alabama']\n\nTable Addresses, columns = [*,address_id,line_1,line_2,city,zip_postcode,state_province_county,country]\nTable Candidate_Assessments, columns = [*,candidate_id,qualification,assessment_date,asessment_outcome_code]\nTable Candidates, columns = [*,candidate_id,candidate_details]\nTable Courses, columns = [*,course_id,course_name,course_description,other_details]\nTable People, columns = [*,person_id,first_name,middle_name,last_name,cell_mobile_number,email_address,login_name,password]\nTable People_Addresses, columns = [*,person_address_id,person_id,address_id,date_from,date_to]\nTable Student_Course_Attendance, columns = [*,student_id,course_id,date_of_attendance]\nTable Student_Course_Registrations, columns = [*,student_id,course_id,registration_date]\nTable Students, columns = [*,student_id,student_details]\nForeign_keys = [Students.student_id = People.person_id,People_Addresses.address_id = Addresses.address_id,People_Addresses.person_id = People.person_id,Student_Course_Registrations.course_id = Courses.course_id,Student_Course_Registrations.student_id = Students.student_id,Student_Course_Attendance.student_id = Student_Course_Registrations.student_id,Student_Course_Attendance.course_id = Student_Course_Registrations.course_id,Candidates.candidate_id = People.person_id,Candidate_Assessments.candidate_id = Candidates.candidate_id]\nQ: \"List the id of students who never attends courses?\"\nA: Let’s think step by step. In the question \"List the id of students who never attends courses?\", we are asked:\n\"id of students\" so we need column = [Students.student_id]\n\"never attends courses\" so we need column = [Student_Course_Attendance.student_id]\nBased on the columns and tables, we need these Foreign_keys = [Students.student_id = Student_Course_Attendance.student_id].\nBased on the tables, columns, and Foreign_keys, The set of possible cell values are = []. So the Schema_links are:\nSchema_links: [Students.student_id = Student_Course_Attendance.student_id]\n\nTable Country, columns = [*,id,name]\nTable League, columns = [*,id,country_id,name]\nTable Player, columns = [*,id,player_api_id,player_name,player_fifa_api_id,birthday,height,weight]\nTable Player_Attributes, columns = [*,id,player_fifa_api_id,player_api_id,date,overall_rating,potential,preferred_foot,attacking_work_rate,defensive_work_rate,crossing,finishing,heading_accuracy,short_passing,volleys,dribbling,curve,free_kick_accuracy,long_passing,ball_control,acceleration,sprint_speed,agility,reactions,balance,shot_power,jumping,stamina,strength,long_shots,aggression,interceptions,positioning,vision,penalties,marking,standing_tackle,sliding_tackle,gk_diving,gk_handling,gk_kicking,gk_positioning,gk_reflexes]\nTable Team, columns = [*,id,team_api_id,team_fifa_api_id,team_long_name,team_short_name]\nTable Team_Attributes, columns = [*,id,team_fifa_api_id,team_api_id,date,buildUpPlaySpeed,buildUpPlaySpeedClass,buildUpPlayDribbling,buildUpPlayDribblingClass,buildUpPlayPassing,buildUpPlayPassingClass,buildUpPlayPositioningClass,chanceCreationPassing,chanceCreationPassingClass,chanceCreationCrossing,chanceCreationCrossingClass,chanceCreationShooting,chanceCreationShootingClass,chanceCreationPositioningClass,defencePressure,defencePressureClass,defenceAggression,defenceAggressionClass,defenceTeamWidth,defenceTeamWidthClass,defenceDefenderLineClass]\nTable sqlite_sequence, columns = [*,name,seq]\nForeign_keys = [Player_Attributes.player_api_id = Player.player_api_id,Player_Attributes.player_fifa_api_id = Player.player_fifa_api_id,League.country_id = Country.id,Team_Attributes.team_api_id = Team.team_api_id,Team_Attributes.team_fifa_api_id = Team.team_fifa_api_id]\nQ: \"List the names of all left-footed players who have overall rating between 85 and 90.\"\nA: Let’s think step by step. In the question \"List the names of all left-footed players who have overall rating between 85 and 90.\", we are asked:\n\"names of all left-footed players\" so we need column = [Player.player_name,Player_Attributes.preferred_foot]\n\"players who have overall rating\" so we need column = [Player_Attributes.overall_rating]\nBased on the columns and tables, we need these Foreign_keys = [Player_Attributes.player_api_id = Player.player_api_id].\nBased on the tables, columns, and Foreign_keys, The set of possible cell values are = [left,85,90]. So the Schema_links are:\nSchema_links: [Player.player_name,Player_Attributes.preferred_foot,Player_Attributes.overall_rating,Player_Attributes.player_api_id = Player.player_api_id,left,85,90]\n\nTable advisor, columns = [*,s_ID,i_ID]\nTable classroom, columns = [*,building,room_number,capacity]\nTable course, columns = [*,course_id,title,dept_name,credits]\nTable department, columns = [*,dept_name,building,budget]\nTable instructor, columns = [*,ID,name,dept_name,salary]\nTable prereq, columns = [*,course_id,prereq_id]\nTable section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]\nTable student, columns = [*,ID,name,dept_name,tot_cred]\nTable takes, columns = [*,ID,course_id,sec_id,semester,year,grade]\nTable teaches, columns = [*,ID,course_id,sec_id,semester,year]\nTable time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]\nForeign_keys = [course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]\nQ: \"Give the title of the course offered in Chandler during the Fall of 2010.\"\nA: Let’s think step by step. In the question \"Give the title of the course offered in Chandler during the Fall of 2010.\", we are asked:\n\"title of the course\" so we need column = [course.title]\n\"course offered in Chandler\" so we need column = [SECTION.building]\n\"during the Fall\" so we need column = [SECTION.semester]\n\"of 2010\" so we need column = [SECTION.year]\nBased on the columns and tables, we need these Foreign_keys = [course.course_id = SECTION.course_id].\nBased on the tables, columns, and Foreign_keys, The set of possible cell values are = [Chandler,Fall,2010]. So the Schema_links are:\nSchema_links: [course.title,course.course_id = SECTION.course_id,SECTION.building,SECTION.year,SECTION.semester,Chandler,Fall,2010]\n\nTable city, columns = [*,City_ID,Official_Name,Status,Area_km_2,Population,Census_Ranking]\nTable competition_record, columns = [*,Competition_ID,Farm_ID,Rank]\nTable farm, columns = [*,Farm_ID,Year,Total_Horses,Working_Horses,Total_Cattle,Oxen,Bulls,Cows,Pigs,Sheep_and_Goats]\nTable farm_competition, columns = [*,Competition_ID,Year,Theme,Host_city_ID,Hosts]\nForeign_keys = [farm_competition.Host_city_ID = city.City_ID,competition_record.Farm_ID = farm.Farm_ID,competition_record.Competition_ID = farm_competition.Competition_ID]\nQ: \"Show the status of the city that has hosted the greatest number of competitions.\"\nA: Let’s think step by step. In the question \"Show the status of the city that has hosted the greatest number of competitions.\", we are asked:\n\"the status of the city\" so we need column = [city.Status]\n\"greatest number of competitions\" so we need column = [farm_competition.*]\nBased on the columns and tables, we need these Foreign_keys = [farm_competition.Host_city_ID = city.City_ID].\nBased on the tables, columns, and Foreign_keys, The set of possible cell values are = []. So the Schema_links are:\nSchema_links: [city.Status,farm_competition.Host_city_ID = city.City_ID,farm_competition.*]\n\nTable advisor, columns = [*,s_ID,i_ID]\nTable classroom, columns = [*,building,room_number,capacity]\nTable course, columns = [*,course_id,title,dept_name,credits]\nTable department, columns = [*,dept_name,building,budget]\nTable instructor, columns = [*,ID,name,dept_name,salary]\nTable prereq, columns = [*,course_id,prereq_id]\nTable section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]\nTable student, columns = [*,ID,name,dept_name,tot_cred]\nTable takes, columns = [*,ID,course_id,sec_id,semester,year,grade]\nTable teaches, columns = [*,ID,course_id,sec_id,semester,year]\nTable time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]\nForeign_keys = [course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]\nQ: \"Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010.\"\nA: Let’s think step by step. In the question \"Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010.\", we are asked:\n\"id of instructors who taught \" so we need column = [teaches.id]\n\"taught a class in\" so we need column = [teaches.semester,teaches.year]\nBased on the columns and tables, we need these Foreign_keys = [].\nBased on the tables, columns, and Foreign_keys, The set of possible cell values are = [Fall,2009,Spring,2010]. So the Schema_links are:\nschema_links: [teaches.id,teaches.semester,teaches.year,Fall,2009,Spring,2010]\n\nTable Accounts, columns = [*,account_id,customer_id,date_account_opened,account_name,other_account_details]\nTable Customers, columns = [*,customer_id,customer_first_name,customer_middle_initial,customer_last_name,gender,email_address,login_name,login_password,phone_number,town_city,state_county_province,country]\nTable Financial_Transactions, columns = [*,transaction_id,account_id,invoice_number,transaction_type,transaction_date,transaction_amount,transaction_comment,other_transaction_details]\nTable Invoice_Line_Items, columns = [*,order_item_id,invoice_number,product_id,product_title,product_quantity,product_price,derived_product_cost,derived_vat_payable,derived_total_cost]\nTable Invoices, columns = [*,invoice_number,order_id,invoice_date]\nTable Order_Items, columns = [*,order_item_id,order_id,product_id,product_quantity,other_order_item_details]\nTable Orders, columns = [*,order_id,customer_id,date_order_placed,order_details]\nTable Product_Categories, columns = [*,production_type_code,product_type_description,vat_rating]\nTable Products, columns = [*,product_id,parent_product_id,production_type_code,unit_price,product_name,product_color,product_size]\nForeign_keys = [Orders.customer_id = Customers.customer_id,Invoices.order_id = Orders.order_id,Accounts.customer_id = Customers.customer_id,Products.production_type_code = Product_Categories.production_type_code,Financial_Transactions.account_id = Accounts.account_id,Financial_Transactions.invoice_number = Invoices.invoice_number,Order_Items.order_id = Orders.order_id,Order_Items.product_id = Products.product_id,Invoice_Line_Items.product_id = Products.product_id,Invoice_Line_Items.invoice_number = Invoices.invoice_number,Invoice_Line_Items.order_item_id = Order_Items.order_item_id]\nQ: \"Show the id, the date of account opened, the account name, and other account detail for all accounts.\"\nA: Let’s think step by step. In the question \"Show the id, the date of account opened, the account name, and other account detail for all accounts.\", we are asked:\n\"the id, the date of account opened, the account name, and other account detail for all accounts.\" so we need column = [Accounts.account_id,Accounts.account_name,Accounts.other_account_details,Accounts.date_account_opened]\nBased on the columns and tables, we need these Foreign_keys = [].\nBased on the tables, columns, and Foreign_keys, The set of possible cell values are = []. So the Schema_links are:\nSchema_links: [Accounts.account_id,Accounts.account_name,Accounts.other_account_details,Accounts.date_account_opened]\n\nTable city, columns = [*,City_ID,Official_Name,Status,Area_km_2,Population,Census_Ranking]\nTable competition_record, columns = [*,Competition_ID,Farm_ID,Rank]\nTable farm, columns = [*,Farm_ID,Year,Total_Horses,Working_Horses,Total_Cattle,Oxen,Bulls,Cows,Pigs,Sheep_and_Goats]\nTable farm_competition, columns = [*,Competition_ID,Year,Theme,Host_city_ID,Hosts]\nForeign_keys = [farm_competition.Host_city_ID = city.City_ID,competition_record.Farm_ID = farm.Farm_ID,competition_record.Competition_ID = farm_competition.Competition_ID]\nQ: \"Show the status shared by cities with population bigger than 1500 and smaller than 500.\"\nA: Let’s think step by step. In the question \"Show the status shared by cities with population bigger than 1500 and smaller than 500.\", we are asked:\n\"the status shared by cities\" so we need column = [city.Status]\n\"cities with population\" so we need column = [city.Population]\nBased on the columns and tables, we need these Foreign_keys = [].\nBased on the tables, columns, and Foreign_keys, The set of possible cell values are = [1500,500]. So the Schema_links are:\nSchema_links: [city.Status,city.Population,1500,500]\n\n",
"Q: \"Find the buildings which have rooms with capacity more than 50.\"\nSchema_links: [classroom.building,classroom.capacity,50]\nSQL: SELECT DISTINCT building FROM classroom WHERE capacity > 50\n\nQ: \"Find the room number of the rooms which can sit 50 to 100 students and their buildings.\"\nSchema_links: [classroom.building,classroom.room_number,classroom.capacity,50,100]\nSQL: SELECT building , room_number FROM classroom WHERE capacity BETWEEN 50 AND 100\n\nQ: \"Give the name of the student in the History department with the most credits.\"\nSchema_links: [student.name,student.dept_name,student.tot_cred,History]\nSQL: SELECT name FROM student WHERE dept_name = 'History' ORDER BY tot_cred DESC LIMIT 1\n\nQ: \"Find the total budgets of the Marketing or Finance department.\"\nSchema_links: [department.budget,department.dept_name,Marketing,Finance]\nSQL: SELECT sum(budget) FROM department WHERE dept_name = 'Marketing' OR dept_name = 'Finance'\n\nQ: \"Find the department name of the instructor whose name contains 'Soisalon'.\"\nSchema_links: [instructor.dept_name,instructor.name,Soisalon]\nSQL: SELECT dept_name FROM instructor WHERE name LIKE '%Soisalon%'\n\nQ: \"What is the name of the department with the most credits?\"\nSchema_links: [course.dept_name,course.credits]\nSQL: SELECT dept_name FROM course GROUP BY dept_name ORDER BY sum(credits) DESC LIMIT 1\n\nQ: \"How many instructors teach a course in the Spring of 2010?\"\nSchema_links: [teaches.ID,teaches.semester,teaches.YEAR,Spring,2010]\nSQL: SELECT COUNT (DISTINCT ID) FROM teaches WHERE semester = 'Spring' AND YEAR = 2010\n\nQ: \"Find the name of the students and their department names sorted by their total credits in ascending order.\"\nSchema_links: [student.name,student.dept_name,student.tot_cred]\nSQL: SELECT name , dept_name FROM student ORDER BY tot_cred\n\nQ: \"Find the year which offers the largest number of courses.\"\nSchema_links: [SECTION.YEAR,SECTION.*]\nSQL: SELECT YEAR FROM SECTION GROUP BY YEAR ORDER BY count(*) DESC LIMIT 1\n\nQ: \"What are the names and average salaries for departments with average salary higher than 42000?\"\nSchema_links: [instructor.dept_name,instructor.salary,42000]\nSQL: SELECT dept_name , AVG (salary) FROM instructor GROUP BY dept_name HAVING AVG (salary) > 42000\n\nQ: \"How many rooms in each building have a capacity of over 50?\"\nSchema_links: [classroom.*,classroom.building,classroom.capacity,50]\nSQL: SELECT count(*) , building FROM classroom WHERE capacity > 50 GROUP BY building\n\nQ: \"Find the names of the top 3 departments that provide the largest amount of courses?\"\nSchema_links: [course.dept_name,course.*]\nSQL: SELECT dept_name FROM course GROUP BY dept_name ORDER BY count(*) DESC LIMIT 3\n\nQ: \"Find the maximum and average capacity among rooms in each building.\"\nSchema_links: [classroom.building,classroom.capacity]\nSQL: SELECT max(capacity) , avg(capacity) , building FROM classroom GROUP BY building\n\nQ: \"Find the title of the course that is offered by more than one department.\"\nSchema_links: [course.title]\nSQL: SELECT title FROM course GROUP BY title HAVING count(*) > 1\n\n",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERQ: \"PLACEHOLDER\nSchema_links: PLACEHOLDER\nA: Let’s think step by step.",
"PLACEHOLDERPLACEHOLDER#### Question: PLACEHOLDER\n#### SQLite SQL QUERY\nPLACEHOLDER\n#### SQLite FIXED SQL QUERY\nSELECT",
"\nschema_links: ",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERPLACEHOLDERQ: \"PLACEHOLDER\"\nA: Let’s think step by step.",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERQ: \"PLACEHOLDER\nschema_links: PLACEHOLDER\nA: Let’s think step by step.",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERQ: \"PLACEHOLDER\nSchema_links: PLACEHOLDER\nSQL:",
"Q: \"Find the total budgets of the Marketing or Finance department.\"\nSchema_links: [department.budget,department.dept_name,Marketing,Finance]\nA: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = []. First, create an intermediate representation, then use it to construct the SQL query.\nIntermediate_representation: select sum(department.budget) from department where department.dept_name = \"Marketing\" or department.dept_name = \"Finance\"\nSQL: SELECT sum(budget) FROM department WHERE dept_name = 'Marketing' OR dept_name = 'Finance'\n\nQ: \"Find the name and building of the department with the highest budget.\"\nSchema_links: [department.budget,department.dept_name,department.building]\nA: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = []. First, create an intermediate representation, then use it to construct the SQL query.\nIntermediate_representation: select department.dept_name , department.building from department order by department.budget desc limit 1\nSQL: SELECT dept_name , building FROM department ORDER BY budget DESC LIMIT 1\n\nQ: \"What is the name and building of the departments whose budget is more than the average budget?\"\nSchema_links: [department.budget,department.dept_name,department.building]\nA: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = []. First, create an intermediate representation, then use it to construct the SQL query.\nIntermediate_representation: select department.dept_name , department.building from department where @.@ > avg ( department.budget ) \nSQL: SELECT dept_name , building FROM department WHERE budget > (SELECT avg(budget) FROM department)\n\nQ: \"Find the total number of students and total number of instructors for each department.\"\nSchema_links: [department.dept_name = student.dept_name,student.id,department.dept_name = instructor.dept_name,instructor.id]\nA: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = [department,student,instructor]. First, create an intermediate representation, then use it to construct the SQL query.\nIntermediate_representation: \"select count( distinct student.ID) , count( distinct instructor.ID) , department.dept_name from department group by instructor.dept_name\nSQL: SELECT count(DISTINCT T2.id) , count(DISTINCT T3.id) , T3.dept_name FROM department AS T1 JOIN student AS T2 ON T1.dept_name = T2.dept_name JOIN instructor AS T3 ON T1.dept_name = T3.dept_name GROUP BY T3.dept_name\n\nQ: \"Find the title of courses that have two prerequisites?\"\nSchema_links: [course.title,course.course_id = prereq.course_id]\nA: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = [course,prereq]. First, create an intermediate representation, then use it to construct the SQL query.\nIntermediate_representation: select course.title from course where count ( prereq.* ) = 2 group by prereq.course_id\nSQL: SELECT T1.title FROM course AS T1 JOIN prereq AS T2 ON T1.course_id = T2.course_id GROUP BY T2.course_id HAVING count(*) = 2\n\nQ: \"Find the name of students who took any class in the years of 2009 and 2010.\"\nSchema_links: [student.name,student.id = takes.id,takes.YEAR,2009,2010]\nA: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = [student,takes]. First, create an intermediate representation, then use it to construct the SQL query.\nIntermediate_representation: select distinct student.name from student where takes.year = 2009 or takes.year = 2010\nSQL: SELECT DISTINCT T1.name FROM student AS T1 JOIN takes AS T2 ON T1.id = T2.id WHERE T2.YEAR = 2009 OR T2.YEAR = 2010\n\nQ: \"list in alphabetic order all course names and their instructors' names in year 2008.\"\nSchema_links: [course.title,course.course_id = teaches.course_id,teaches.id = instructor.id,instructor.name,teaches.year,2008]\nA: Let’s think step by step. For creating the SQL for the given question, we need to join these tables = [course,teaches,instructor]. First, create an intermediate representation, then use it to construct the SQL query.\nIntermediate_representation: select course.title , instructor.name from course where teaches.year = 2008 order by course.title asc\nSQL: SELECT T1.title , T3.name FROM course AS T1 JOIN teaches AS T2 ON T1.course_id = T2.course_id JOIN instructor AS T3 ON T2.id = T3.id WHERE T2.YEAR = 2008 ORDER BY T1.title\n\n",
"Q: \"",
"Q: \"Find the title of courses that have two prerequisites?\"\nSchema_links: [course.title,course.course_id = prereq.course_id]\nA: Let's think step by step. \"Find the title of courses that have two prerequisites?\" can be solved by knowing the answer to the following sub-question \"What are the titles for courses with two prerequisites?\".\nThe SQL query for the sub-question \"What are the titles for courses with two prerequisites?\" is SELECT T1.title FROM course AS T1 JOIN prereq AS T2 ON T1.course_id = T2.course_id GROUP BY T2.course_id HAVING count(*) = 2\nSo, the answer to the question \"Find the title of courses that have two prerequisites?\" is =\nIntermediate_representation: select course.title from course where count ( prereq.* ) = 2 group by prereq.course_id\nSQL: SELECT T1.title FROM course AS T1 JOIN prereq AS T2 ON T1.course_id = T2.course_id GROUP BY T2.course_id HAVING count(*) = 2\n\nQ: \"Find the name and building of the department with the highest budget.\"\nSchema_links: [department.dept_name,department.building,department.budget]\nA: Let's think step by step. \"Find the name and building of the department with the highest budget.\" can be solved by knowing the answer to the following sub-question \"What is the department name and corresponding building for the department with the greatest budget?\".\nThe SQL query for the sub-question \"What is the department name and corresponding building for the department with the greatest budget?\" is SELECT dept_name , building FROM department ORDER BY budget DESC LIMIT 1\nSo, the answer to the question \"Find the name and building of the department with the highest budget.\" is =\nIntermediate_representation: select department.dept_name , department.building from department order by department.budget desc limit 1\nSQL: SELECT dept_name , building FROM department ORDER BY budget DESC LIMIT 1\n\nQ: \"Find the title, credit, and department name of courses that have more than one prerequisites?\"\nSchema_links: [course.title,course.credits,course.dept_name,course.course_id = prereq.course_id]\nA: Let's think step by step. \"Find the title, credit, and department name of courses that have more than one prerequisites?\" can be solved by knowing the answer to the following sub-question \"What is the title, credit value, and department name for courses with more than one prerequisite?\".\nThe SQL query for the sub-question \"What is the title, credit value, and department name for courses with more than one prerequisite?\" is SELECT T1.title , T1.credits , T1.dept_name FROM course AS T1 JOIN prereq AS T2 ON T1.course_id = T2.course_id GROUP BY T2.course_id HAVING count(*) > 1\nSo, the answer to the question \"Find the name and building of the department with the highest budget.\" is =\nIntermediate_representation: select course.title , course.credits , course.dept_name from course where count ( prereq.* ) > 1 group by prereq.course_id \nSQL: SELECT T1.title , T1.credits , T1.dept_name FROM course AS T1 JOIN prereq AS T2 ON T1.course_id = T2.course_id GROUP BY T2.course_id HAVING count(*) > 1\n\nQ: \"Give the name and building of the departments with greater than average budget.\"\nSchema_links: [department.dept_name,department.building,department.budget]\nA: Let's think step by step. \"Give the name and building of the departments with greater than average budget.\" can be solved by knowing the answer to the following sub-question \"What is the average budget of departments?\".\nThe SQL query for the sub-question \"What is the average budget of departments?\" is SELECT avg(budget) FROM department\nSo, the answer to the question \"Give the name and building of the departments with greater than average budget.\" is =\nIntermediate_representation: select department.dept_name , department.building from department where @.@ > avg ( department.budget )\nSQL: SELECT dept_name , building FROM department WHERE budget > (SELECT avg(budget) FROM department)\n\nQ: \"Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010.\"\nSchema_links: [teaches.id,teaches.semester,teaches.YEAR,Fall,2009,Spring,2010]\nA: Let's think step by step. \"Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010.\" can be solved by knowing the answer to the following sub-question \"Find the id of instructors who taught a class in Spring 2010\".\nThe SQL query for the sub-question \"Find the id of instructors who taught a class in Spring 2010\" is SELECT id FROM teaches WHERE semester = 'Spring' AND YEAR = 2010\nSo, the answer to the question \"Find the id of instructors who taught a class in Fall 2009 but not in Spring 2010.\" is = \nIntermediate_representation: select teaches.ID from teaches where teaches.semester = \"Fall\" and teaches.year = 2009 and teaches.semester != \"Spring\" and teaches.year = 2010 \nSQL: SELECT id FROM teaches WHERE semester = 'Fall' AND YEAR = 2009 EXCEPT SELECT id FROM teaches WHERE semester = 'Spring' AND YEAR = 2010\n\nQ: \"Find the name of the courses that do not have any prerequisite?\"\nSchema_links: [course.title,course.course_id]\nA: Let's think step by step. \"Find the name of the courses that do not have any prerequisite?\" can be solved by knowing the answer to the following sub-question \"What are the courses that have any prerequisite?\".\nThe SQL query for the sub-question \"What are the courses that have any prerequisite?\" is SELECT course_id FROM prereq\nSo, the answer to the question \"Find the name of the courses that do not have any prerequisite?\" is =\nIntermediate_representation: select course.title from course where @.@ not in prereq.course_id \nSQL: SELECT title FROM course WHERE course_id NOT IN (SELECT course_id FROM prereq)\n\nQ: \"Find the salaries of all distinct instructors that are less than the largest salary.\"\nSchema_links: [instructor.salary]\nA: Let's think step by step. \"Find the salaries of all distinct instructors that are less than the largest salary.\" can be solved by knowing the answer to the following sub-question \"What is the largest salary of instructors\".\nThe SQL query for the sub-question \"What is the largest salary of instructors\" is SELECT max(salary) FROM instructor\nSo, the answer to the question \"Find the salaries of all distinct instructors that are less than the largest salary.\" is =\nIntermediate_representation: select distinct instructor.salary from instructor where @.@ < max ( instructor.salary )\nSQL: SELECT DISTINCT salary FROM instructor WHERE salary < (SELECT max(salary) FROM instructor)\n\nQ: \"Find the names of students who have taken any course in the fall semester of year 2003.\"\nSchema_links: [student.id,student.name,takes.id,takes.semester,fall,2003]\nA: Let's think step by step. \"Find the names of students who have taken any course in the fall semester of year 2003.\" can be solved by knowing the answer to the following sub-question \"Find the students who have taken any course in the fall semester of year 2003.\".\nThe SQL query for the sub-question \"Find the students who have taken any course in the fall semester of year 2003.\" is SELECT id FROM takes WHERE semester = 'Fall' AND YEAR = 2003\nSo, the answer to the question \"Find the names of students who have taken any course in the fall semester of year 2003.\" is =\nIntermediate_representation: select student.name from student where takes.semester = \"Fall\" and takes.year = 2003\nSQL: SELECT name FROM student WHERE id IN (SELECT id FROM takes WHERE semester = 'Fall' AND YEAR = 2003)\n\nQ: \"Find the minimum salary for the departments whose average salary is above the average payment of all instructors.\"\nSchema_links: [instructor.salary,instructor.dept_name]\nA: Let's think step by step. \"Find the minimum salary for the departments whose average salary is above the average payment of all instructors.\" can be solved by knowing the answer to the following sub-question \"What is the average payment of all instructors.\".\nThe SQL query for the sub-question \"What is the average payment of all instructors.\" is SELECT avg(salary) FROM instructor\nSo, the answer to the question \"Find the minimum salary for the departments whose average salary is above the average payment of all instructors.\" is =\nIntermediate_representation: select min(instructor.salary) , instructor.dept_name from instructor where avg ( instructor.salary ) > avg ( instructor.salary ) group by instructor.dept_name\nSQL: SELECT min(salary) , dept_name FROM instructor GROUP BY dept_name HAVING avg(salary) > (SELECT avg(salary) FROM instructor)\n\nQ: \"What is the course title of the prerequisite of course Mobile Computing?\"\nSchema_links: [course.title,course.course_id = prereq.course_id,prereq.prereq_id,course.title,Mobile Computing]\nA: Let's think step by step. \"What is the course title of the prerequisite of course Mobile Computing?\" can be solved by knowing the answer to the following sub-question \"What are the ids of the prerequisite of course Mobile Computing?\".\nThe SQL query for the sub-question \"What are the ids of the prerequisite of course Mobile Computing?\" is SSELECT T1.prereq_id FROM prereq AS T1 JOIN course AS T2 ON T1.course_id = T2.course_id WHERE T2.title = 'Mobile Computing'\nSo, the answer to the question \"What is the course title of the prerequisite of course Mobile Computing?\" is =\nIntermediate_representation: select course.title from course where @.@ in prereq.* and course.title = \"Mobile Computing\"\nSQL: SELECT title FROM course WHERE course_id IN (SELECT T1.prereq_id FROM prereq AS T1 JOIN course AS T2 ON T1.course_id = T2.course_id WHERE T2.title = 'Mobile Computing')\n\nQ: \"Give the title and credits for the course that is taught in the classroom with the greatest capacity.\"\nSchema_links: [classroom.capacity,classroom.building = SECTION.building,classroom.room_number = SECTION.room_number,course.title,course.credits,course.course_id = SECTION.course_id]\nA: Let's think step by step. \"Give the title and credits for the course that is taught in the classroom with the greatest capacity.\" can be solved by knowing the answer to the following sub-question \"What is the capacity of the largest room?\".\nThe SQL query for the sub-question \"What is the capacity of the largest room?\" is (SELECT max(capacity) FROM classroom)\nSo, the answer to the question \"Give the title and credits for the course that is taught in the classroom with the greatest capacity.\" is =\nIntermediate_representation: select course.title , course.credits from classroom order by classroom.capacity desc limit 1\"\nSQL: SELECT T3.title , T3.credits FROM classroom AS T1 JOIN SECTION AS T2 ON T1.building = T2.building AND T1.room_number = T2.room_number JOIN course AS T3 ON T2.course_id = T3.course_id WHERE T1.capacity = (SELECT max(capacity) FROM classroom)\n\n",
"\nThe SQL query for the sub-question\""
] |
2024-01-10 | YanJiaHuan/Text2Sql | multi_turn~Bard_GPT~V0_fewshot~V0_fewshot.py | import pandas as pd
import time
import re
import openai
import os
from os import environ
import sys
import tiktoken
import sqlite3
from Bard import Chatbot
'''
This is a few-shot prompting trail for GPT-3.5, which works as a baseline for our experimens.
Version 0_1:
1. GPT will be given a database schema and a natural language question, and it will generate the corresponding SQL query.
2. we will give GPT a few-shot prompting, which is a few SQL queries with the same database schema.
3. we will try 1-shot,3-shot, 5-shot (sampling from the training set) and see how GPT performs.
4. we will try 1-shot,3-shot, 5-shot (sampling from the test set) and see how GPT performs.
'''
#################### 0. Prompt ####################
SQL_generation_prompt = '''
You are an expert in SQL. I will give you a natural language question and a database schema,
please help me generate the corresponding SQL query with no further explaination.
'''
checker_prompt = '''
Please help me generate the corresponding SQL query with no further explaination.
'''
#################### 1. Set up ####################
#----------------------------------------------------------------------------------------------------------
# API_KEY = "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" # 自己的
API_KEY = "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" # 买的
# API_KEY = "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" # gpt4 孙哥
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = os.getenv("OPENAI_API_KEY")
#changed
task = 'Spider' # 1 for CoSQL, 2 for Spider
if task == 'CoSQL':
path_to_CoSQL = "./cosql_dataset"
DATASET_SCHEMA = path_to_CoSQL+"/tables.json"
DATASET = path_to_CoSQL+"/sql_state_tracking/cosql_dev.json"
OUTPUT_FILE_1 = "./predicted_sql.txt"
OUTPUT_FILE_2 = "./gold_sql.txt"
DATABASE_PATH = path_to_CoSQL+"/database"
else:
path_to_Spider = "/Users/yan/Desktop/text2sql/spider"
DATASET_SCHEMA = path_to_Spider + "/tables.json"
DATASET = path_to_Spider + "/dev.json"
OUTPUT_FILE_1 = "./Spider/predicted_sql.txt"
OUTPUT_FILE_2 = "./Spider/gold_sql.txt"
DATABASE_PATH = path_to_Spider + "/database"
# set max tokens limit
MAX_TOKENS = 4096
model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
encoding = tiktoken.encoding_for_model(model_name)
# count the token
def num_tokens_from_string(string: str, model_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# load dataset
def load_data(DATASET):
return pd.read_json(DATASET)
def find_foreign_keys_MYSQL_like(db_name):
df = spider_foreign[spider_foreign['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['First Table Name'] + '.' + row['First Table Foreign Key'] + " = " + row['Second Table Name'] + '.' + row['Second Table Foreign Key'] + ','
output= output[:-1] + "]"
return output
def find_fields_MYSQL_like(db_name):
df = spider_schema[spider_schema['Database name'] == db_name]
df = df.groupby(' Table Name')
output = ""
for name, group in df:
output += "Table " +name+ ', columns = ['
for index, row in group.iterrows():
output += row[" Field Name"]+','
output = output[:-1]
output += "]\n"
return output
def find_primary_keys_MYSQL_like(db_name):
df = spider_primary[spider_primary['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['Table Name'] + '.' + row['Primary Key'] +','
output = output[:-1]
output += "]\n"
return output
def creatiing_schema(DATASET_JSON):
schema_df = pd.read_json(DATASET_JSON)
schema_df = schema_df.drop(['column_names','table_names'], axis=1)
schema = []
f_keys = []
p_keys = []
for index, row in schema_df.iterrows():
tables = row['table_names_original']
col_names = row['column_names_original']
col_types = row['column_types']
foreign_keys = row['foreign_keys']
primary_keys = row['primary_keys']
for col, col_type in zip(col_names, col_types):
index, col_name = col
if index == -1:
for table in tables:
schema.append([row['db_id'], table, '*', 'text'])
else:
schema.append([row['db_id'], tables[index], col_name, col_type])
for primary_key in primary_keys:
index, column = col_names[primary_key]
p_keys.append([row['db_id'], tables[index], column])
for foreign_key in foreign_keys:
first, second = foreign_key
first_index, first_column = col_names[first]
second_index, second_column = col_names[second]
f_keys.append([row['db_id'], tables[first_index], tables[second_index], first_column, second_column])
spider_schema = pd.DataFrame(schema, columns=['Database name', ' Table Name', ' Field Name', ' Type'])
spider_primary = pd.DataFrame(p_keys, columns=['Database name', 'Table Name', 'Primary Key'])
spider_foreign = pd.DataFrame(f_keys,
columns=['Database name', 'First Table Name', 'Second Table Name', 'First Table Foreign Key',
'Second Table Foreign Key'])
return spider_schema,spider_primary,spider_foreign
def SQL_checker(sql, database):
# sql be like: "SELECT * FROM car_1 WHERE car_1.id = 1"
# database is the path to local xxx.sqlite
# the function of this part is to check if the sql is valid, if not, return the error message
path = DATABASE_PATH + '/' + database + '/' + database + '.sqlite'
try:
# Connect to the SQLite database
conn = sqlite3.connect(path)
# Create a cursor object to execute the SQL query
cursor = conn.cursor()
# Execute the SQL query
cursor.execute(sql)
# Commit the transaction and close the connection
conn.commit()
conn.close()
# Return a success message if the SQL query is valid
prompt = "The SQL query is valid in grammar."
checker = False
except sqlite3.Error as e:
# Return the error message if the SQL query is not valid
instruction = f"""#### the sql generated by you: {sql}, has error like :{e} , please fix the error and generate again. \n"""
fields = find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "Primary_keys = " + find_primary_keys_MYSQL_like(database)
prompt = instruction + fields + checker_prompt
checker = True
return prompt, checker
import time
def GPT4_generation(prompt):
'''
openai.error.RateLimitError: Rate limit reached for default-gpt-3.5-turbo
in organization org-GFmlumrCZBB2Y40fVv7f8qgp on requests per min. Limit: 3 / min.
Please try again in 20s. Contact us through our help center at help.openai.com if you continue to have issues.
Please add a payment method to your account to increase your rate limit.
Visit https://platform.openai.com/account/billing to add a payment method.
'''
limit_marker = False
fake_SQL = "SELECT COUNT(*) FROM singer"
while True:
try:
response = openai.ChatCompletion.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=600,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
return response['choices'][0]['message']['content'], limit_marker
except openai.error.RateLimitError as e:
print(f"RateLimitError: {e}")
print("Sleeping for 20 seconds...")
time.sleep(20)
print("Retrying...")
except Exception as e:
print(f"Unexpected error: {e}")
return fake_SQL, limit_marker
# initial the chatbot
def extract_sql(response):
matches = re.findall(r'```sql\n(.*?)\n```', response, re.DOTALL)
return matches
tokens=(
"WwiJN0oLURBx7gX_O8WVz9Fufj1iefdzkpt2fsbsb-e8al2Kvufapnj5mYa6vGo5P1ub9w.",
"WwhXnsbFLxozhOKG1-NUO78iif9IiN5El3Qk9yk5fi70TMcaUMOwfWwjTyqAyNe6MCtiEA.",
"Wwi1wxVyz-X2piJk8Ts84d08Fm1UmHDTOS7ftlD6LCXdbUVjFrQlJfl97an8UHhZQM8juQ.",
"Wwj6xMcUvzQUaKwcRQ-qvwrIcZLDBRp9XP25HkEVBAJDVZBzujepzI_dttehdJiCAjCIMg.",
"WwjMZ_TL9xIl4jREPppT5df6tAsjLLgjRo_GKK5iLslGOh5lMtstOMP_iJEADXq6gjFEKA.",
"Wgj-oa5yHxfmjo0lLybtWGLiWYoKTZ07NXcUiaPiUHmtQQiAKlfzNTOA9lwqmCz2N0qGFg."
)
def Bard_generation(prompt):
limit_marker = False
token_index = 0
chatbot = Chatbot(tokens[token_index])
answer = chatbot.ask(prompt)
print('whole answer', answer)
while True: # This loop will continue until a string is returned
if isinstance(answer, dict): # check if answer is a dictionary (error response)
limit_marker = True
print("Token limit reached, switching to a new token...")
token_index += 1 # Move to the next token
if token_index >= len(tokens): # If we've used all tokens, start over
token_index = 0
print("exceeding total limit, Waiting 15 seconds...")
time.sleep(15) # freeze for 15s
chatbot = Chatbot(tokens[token_index]) # Create a new chatbot with the new token
answer = chatbot.ask(prompt) # resend the request
else:
return answer[0][0], limit_marker
def save_breaker(breaker):
with open("breaker.txt", "w") as f:
f.write(str(breaker))
# Function to load the breaker value from a file
def load_breaker():
if os.path.exists("breaker.txt"):
with open("breaker.txt", "r") as f:
breaker = int(f.read())
if breaker > 1037:
breaker = 0
else:
breaker = breaker
return breaker
return 0
if __name__ == '__main__':
###########################################################################################
# load the data
spider_schema,spider_primary,spider_foreign = creatiing_schema(DATASET_SCHEMA)
val_df = load_data(DATASET)
SQLs_temp_pred = []
SQLs_temp_gold = []
for index,sample in val_df.iterrows():
print('index:',index)
db_id = sample['db_id'] # e.g.'car_1'
question = sample['question'] # e.g.'How many car models are produced by each maker? List the count and the maker full name.'
SQL_gold = sample['query'] # e.g.'SELECT COUNT(*) FROM car_1 WHERE car_1.id = 1'
print('SQL_gold:',SQL_gold)
schema = find_fields_MYSQL_like(db_id) + '\n' + "foreign key:" + find_foreign_keys_MYSQL_like(
db_id) + '\n' + "primary key:" + find_primary_keys_MYSQL_like(db_id) #
###############################################
'''message to GPT, to get SQL'''
message_GPT = SQL_generation_prompt + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema + \
"Just give me the plain SQL without any placeholders." + \
"\nquestion:" + question+ \
"\nYour SQL:"
print('message to GPT3.5:', message_GPT)
SQL, limit_marker = GPT4_generation(message_GPT)
print('SQL:', SQL)
SQL = SQL.replace('\n', ' ')
print('\nGPT generated SQL:', SQL + '\n')
SQLs_temp_pred.append(SQL)
SQLs_temp_gold.append(SQL_gold+'\t'+db_id)
with open ('./predicted_sql.txt', 'a') as f:
f.write(SQL+'\n')
with open ('./gold_sql.txt', 'a') as f:
f.write(SQL_gold+'\t'+db_id+'\n')
# CUDA_VISIBLE_DEVICES=7 python read_cosql.py | [
"The SQL query is valid in grammar.",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"\nYou are an expert in SQL. I will give you a natural language question and a database schema, \nplease help me generate the corresponding SQL query with no further explaination.\n",
"\nPlease help me generate the corresponding SQL query with no further explaination.\n"
] |
2024-01-10 | YanJiaHuan/Text2Sql | multi_turn~GPT3_CoSQL.py | import pandas as pd
import time
import openai
import os
import sys
import tiktoken
import sqlite3
#################### 0. Prompt ####################
SQL_generation_prompt = '''
You are an expert in SQL. I will give you a natural language question and a database schema,
please help me generate the corresponding SQL query with no further explaination.
'''
three_shots_SQL_generation_prompt = '''
Here is some examples of EASY, MEDIUM and HARD SQL queries.
SELECT count(*) FROM singer
SELECT avg(weight) , pettype FROM pets GROUP BY pettype
SELECT T1.fname , T1.age FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'dog' AND T1.stuid NOT IN (SELECT T1.stuid FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'cat')
'''
checker_prompt = '''
Please help me generate the corresponding SQL query with no further explaination.
'''
#################### 1. Set up ####################
#----------------------------------------------------------------------------------------------------------
API_KEY = "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" # 自己的
# API_KEY = "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" # 买的
# API_KEY = "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" # gpt4 孙哥
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = os.getenv("OPENAI_API_KEY")
#changed
task = 'CoSQL' # 1 for CoSQL, 2 for Spider
if task == 'CoSQL':
path_to_CoSQL = "/Users/yan/Desktop/text2sql/cosql_dataset"
DATASET_SCHEMA = path_to_CoSQL+"/tables.json"
DATASET = path_to_CoSQL+"/sql_state_tracking/cosql_dev.json"
OUTPUT_FILE_1 = "./predicted_sql.txt"
OUTPUT_FILE_2 = "./gold_sql.txt"
DATABASE_PATH = path_to_CoSQL+"/database"
else:
path_to_Spider = "/Users/yan/Desktop/text2sql/spider"
DATASET_SCHEMA = path_to_Spider + "/tables.json"
DATASET = path_to_Spider + "/dev.json"
OUTPUT_FILE_1 = "./Spider/predicted_sql.txt"
OUTPUT_FILE_2 = "./Spider/gold_sql.txt"
DATABASE_PATH = path_to_Spider + "/database"
# set max tokens limit
MAX_TOKENS = 4096
model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
encoding = tiktoken.encoding_for_model(model_name)
# count the token
def num_tokens_from_string(string: str, model_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# load dataset
def load_data(DATASET):
return pd.read_json(DATASET)
def find_foreign_keys_MYSQL_like(db_name):
df = spider_foreign[spider_foreign['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['First Table Name'] + '.' + row['First Table Foreign Key'] + " = " + row['Second Table Name'] + '.' + row['Second Table Foreign Key'] + ','
output= output[:-1] + "]"
return output
def find_fields_MYSQL_like(db_name):
df = spider_schema[spider_schema['Database name'] == db_name]
df = df.groupby(' Table Name')
output = ""
for name, group in df:
output += "Table " +name+ ', columns = ['
for index, row in group.iterrows():
output += row[" Field Name"]+','
output = output[:-1]
output += "]\n"
return output
def find_primary_keys_MYSQL_like(db_name):
df = spider_primary[spider_primary['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['Table Name'] + '.' + row['Primary Key'] +','
output = output[:-1]
output += "]\n"
return output
def creatiing_schema(DATASET_JSON):
schema_df = pd.read_json(DATASET_JSON)
schema_df = schema_df.drop(['column_names','table_names'], axis=1)
schema = []
f_keys = []
p_keys = []
for index, row in schema_df.iterrows():
tables = row['table_names_original']
col_names = row['column_names_original']
col_types = row['column_types']
foreign_keys = row['foreign_keys']
primary_keys = row['primary_keys']
for col, col_type in zip(col_names, col_types):
index, col_name = col
if index == -1:
for table in tables:
schema.append([row['db_id'], table, '*', 'text'])
else:
schema.append([row['db_id'], tables[index], col_name, col_type])
for primary_key in primary_keys:
index, column = col_names[primary_key]
p_keys.append([row['db_id'], tables[index], column])
for foreign_key in foreign_keys:
first, second = foreign_key
first_index, first_column = col_names[first]
second_index, second_column = col_names[second]
f_keys.append([row['db_id'], tables[first_index], tables[second_index], first_column, second_column])
spider_schema = pd.DataFrame(schema, columns=['Database name', ' Table Name', ' Field Name', ' Type'])
spider_primary = pd.DataFrame(p_keys, columns=['Database name', 'Table Name', 'Primary Key'])
spider_foreign = pd.DataFrame(f_keys,
columns=['Database name', 'First Table Name', 'Second Table Name', 'First Table Foreign Key',
'Second Table Foreign Key'])
return spider_schema,spider_primary,spider_foreign
def SQL_checker(sql, database):
# sql be like: "SELECT * FROM car_1 WHERE car_1.id = 1"
# database is the path to local xxx.sqlite
# the function of this part is to check if the sql is valid, if not, return the error message
path = DATABASE_PATH + '/' + database + '/' + database + '.sqlite'
try:
# Connect to the SQLite database
conn = sqlite3.connect(path)
# Create a cursor object to execute the SQL query
cursor = conn.cursor()
# Execute the SQL query
cursor.execute(sql)
# Commit the transaction and close the connection
conn.commit()
conn.close()
# Return a success message if the SQL query is valid
prompt = "The SQL query is valid in grammar."
checker = False
except sqlite3.Error as e:
# Return the error message if the SQL query is not valid
instruction = f"""#### the sql generated by you: {sql}, has error like :{e} , please fix the error and generate again. \n"""
fields = find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "Primary_keys = " + find_primary_keys_MYSQL_like(database)
prompt = instruction + fields + checker_prompt
checker = True
return prompt, checker
def GPT4_generation(prompt):
limit_marker = False
try:
response = openai.ChatCompletion.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=600,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
return response['choices'][0]['message']['content'], limit_marker
except openai.error.RateLimitError as e:
print(f"RateLimitError: {e}")
limit_marker = True
fake_SQL = "SELECT COUNT(*) FROM singer"
return fake_SQL,limit_marker
def save_breaker(breaker):
with open("breaker.txt", "w") as f:
f.write(str(breaker))
# Function to load the breaker value from a file
def load_breaker():
if os.path.exists("breaker.txt"):
with open("breaker.txt", "r") as f:
breaker = int(f.read())
if breaker > 1037:
breaker = 0
else:
breaker = breaker
return breaker
return 0
if __name__ == '__main__':
###########################################################################################
spider_schema,spider_primary,spider_foreign = creatiing_schema(DATASET_SCHEMA)
val_df = load_data(DATASET)
CODEX = []
# test_SQL = "SELECT COUNT(*) FROM singer"
# test_db = 'concert_singer'
# print(SQL_checker(test_SQL, test_db))
breaker = load_breaker() # mark the breaker point of chatgpt
print("breaker is: ", breaker)
for index, row in val_df[breaker:].iterrows():
#if index < 405: continue #for testing
print(f"index is {index}")
print(row['query'])
print(row['question'])
question = row['question']
db_id = row['db_id']
sql = row['query']
schema = find_fields_MYSQL_like(db_id)+'\n'+"foreign key:"+find_foreign_keys_MYSQL_like(db_id)+'\n'+"primary key:"+find_primary_keys_MYSQL_like(db_id)
# print(schema)
message = SQL_generation_prompt + "Question:"+question + "\ndatabase:"+ db_id + "\ndatabase chema:"+schema+ three_shots_SQL_generation_prompt
# print(message)
SQL,limit_marker = GPT4_generation(message)
if limit_marker:
print("break at index: ", breaker)
break
else:
result_message,checker = SQL_checker(SQL, db_id)
if checker:
print(result_message)
SQL,_ = GPT4_generation(result_message)
else:
print(result_message)
SQL = SQL.replace('\n', ' ')
breaker += 1
CODEX.append([row['question'], SQL, row['query'], row['db_id']])
# break
df = pd.DataFrame(CODEX, columns=['NLQ', 'PREDICTED SQL', 'GOLD SQL', 'DATABASE'])
results = df['PREDICTED SQL'].tolist()
with open(OUTPUT_FILE_1, 'a') as f:
for line in results:
f.write(f"{line}\n")
task = 'CoSQL'
if task == 'CoSQL':
dataset = pd.read_json(DATASET)
gold = []
for index, row in dataset[:index].iterrows():
dict_round = {}
dict_round['query'] = row['interaction'][0]['query']
dict_round['db_id'] = row['database_id']
gold.append(dict_round)
else:
dataset = pd.read_json(DATASET)
gold = []
for index, row in dataset[:index].iterrows():
dict_round = {}
dict_round['query'] = row['query']
dict_round['db_id'] = row['db_id']
gold.append(dict_round)
with open(OUTPUT_FILE_2, 'a') as f:
for item in gold:
f.write(f"{item['query']}\t{item['db_id']}\n")
save_breaker(breaker) | [
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"\nPlease help me generate the corresponding SQL query with no further explaination.\n",
"The SQL query is valid in grammar.",
"\nYou are an expert in SQL. I will give you a natural language question and a database schema, \nplease help me generate the corresponding SQL query with no further explaination.\n",
"\nHere is some examples of EASY, MEDIUM and HARD SQL queries.\nSELECT count(*) FROM singer \nSELECT avg(weight) , pettype FROM pets GROUP BY pettype\nSELECT T1.fname , T1.age FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'dog' AND T1.stuid NOT IN (SELECT T1.stuid FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'cat')\n"
] |
2024-01-10 | YanJiaHuan/Text2Sql | multi_turn~read_cosql.py | import pandas as pd
import time
import openai
import os
import sys
import tiktoken
import sqlite3
from Bard import Chatbot
#################### 0. Prompt ####################
SQL_generation_prompt = '''
You are an expert in SQL. I will give you a natural language question and a database schema,
please help me generate the corresponding SQL query with no further explaination.
'''
three_shots_SQL_generation_prompt = '''
Here is some examples of EASY, MEDIUM and HARD SQL queries.
SELECT count(*) FROM singer
SELECT avg(weight) , pettype FROM pets GROUP BY pettype
SELECT T1.fname , T1.age FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'dog' AND T1.stuid NOT IN (SELECT T1.stuid FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'cat')
'''
zero_shots_SQL_generation_prompt = '''
Sorry, I won't give you any examples. Please generate based on your own semantic parsing ability.
'''
one_shot_Cosql_prompt_without_explain = '''
Here is a sample of multi-turn text2sql for you to understand the task.
Table advisor, columns = [*,s_ID,i_ID]
Table classroom, columns = [*,building,room_number,capacity]
Table course, columns = [*,course_id,title,dept_name,credits]
Table department, columns = [*,dept_name,building,budget]
Table instructor, columns = [*,ID,name,dept_name,salary]
Table prereq, columns = [*,course_id,prereq_id]
Table section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]
Table student, columns = [*,ID,name,dept_name,tot_cred]
Table takes, columns = [*,ID,course_id,sec_id,semester,year,grade]
Table teaches, columns = [*,ID,course_id,sec_id,semester,year]
Table time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]
foreign key:[course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]
primary key:[classroom.building,department.dept_name,course.course_id,instructor.ID,section.course_id,teaches.ID,student.ID,takes.ID,advisor.s_ID,time_slot.time_slot_id,prereq.course_id]
Iteration 1:
Question: Find out the average salary of professors?
SELECT avg ( salary ) FROM instructor
Iteration 2: # iteration 2 will see the question and sql in iteration 1
Question: Find the average salary of the professors of each department?
SELECT avg ( salary ) , dept_name FROM instructor GROUP BY dept_name
Iteration 3: # iteration 3 will see the questiones and sqls in iteration 2 and 1
Question: Which department has the highest average salary of professors?
SELECT dept_name FROM instructor GROUP BY dept_name ORDER BY avg ( salary ) DESC LIMIT 1
Quesion: show the train name and station name for each train.
'''
checker_prompt = '''
Please help me generate the corresponding SQL query with no further explaination.
'''
Contextual_prompt = '''
Now I will give you some context (question and your own answer). Please generate the corresponding SQL query with no further explaination.
'''
#################### 1. Set up ####################
#----------------------------------------------------------------------------------------------------------
# API_KEY = "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" # 自己的
API_KEY = "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" # 买的
# API_KEY = "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" # gpt4 孙哥
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = os.getenv("OPENAI_API_KEY")
#changed
task = 'CoSQL' # 1 for CoSQL, 2 for Spider
if task == 'CoSQL':
path_to_CoSQL = "./cosql_dataset"
DATASET_SCHEMA = path_to_CoSQL+"/tables.json"
DATASET = path_to_CoSQL+"/sql_state_tracking/cosql_dev.json"
OUTPUT_FILE_1 = "./predicted_sql.txt"
OUTPUT_FILE_2 = "./gold_sql.txt"
DATABASE_PATH = path_to_CoSQL+"/database"
else:
path_to_Spider = "/Users/yan/Desktop/text2sql/spider"
DATASET_SCHEMA = path_to_Spider + "/tables.json"
DATASET = path_to_Spider + "/dev.json"
OUTPUT_FILE_1 = "./Spider/predicted_sql.txt"
OUTPUT_FILE_2 = "./Spider/gold_sql.txt"
DATABASE_PATH = path_to_Spider + "/database"
# set max tokens limit
MAX_TOKENS = 4096
model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
encoding = tiktoken.encoding_for_model(model_name)
# count the token
def num_tokens_from_string(string: str, model_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# load dataset
def load_data(DATASET):
return pd.read_json(DATASET)
def find_foreign_keys_MYSQL_like(db_name):
df = spider_foreign[spider_foreign['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['First Table Name'] + '.' + row['First Table Foreign Key'] + " = " + row['Second Table Name'] + '.' + row['Second Table Foreign Key'] + ','
output= output[:-1] + "]"
return output
def find_fields_MYSQL_like(db_name):
df = spider_schema[spider_schema['Database name'] == db_name]
df = df.groupby(' Table Name')
output = ""
for name, group in df:
output += "Table " +name+ ', columns = ['
for index, row in group.iterrows():
output += row[" Field Name"]+','
output = output[:-1]
output += "]\n"
return output
def find_primary_keys_MYSQL_like(db_name):
df = spider_primary[spider_primary['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['Table Name'] + '.' + row['Primary Key'] +','
output = output[:-1]
output += "]\n"
return output
def creatiing_schema(DATASET_JSON):
schema_df = pd.read_json(DATASET_JSON)
schema_df = schema_df.drop(['column_names','table_names'], axis=1)
schema = []
f_keys = []
p_keys = []
for index, row in schema_df.iterrows():
tables = row['table_names_original']
col_names = row['column_names_original']
col_types = row['column_types']
foreign_keys = row['foreign_keys']
primary_keys = row['primary_keys']
for col, col_type in zip(col_names, col_types):
index, col_name = col
if index == -1:
for table in tables:
schema.append([row['db_id'], table, '*', 'text'])
else:
schema.append([row['db_id'], tables[index], col_name, col_type])
for primary_key in primary_keys:
index, column = col_names[primary_key]
p_keys.append([row['db_id'], tables[index], column])
for foreign_key in foreign_keys:
first, second = foreign_key
first_index, first_column = col_names[first]
second_index, second_column = col_names[second]
f_keys.append([row['db_id'], tables[first_index], tables[second_index], first_column, second_column])
spider_schema = pd.DataFrame(schema, columns=['Database name', ' Table Name', ' Field Name', ' Type'])
spider_primary = pd.DataFrame(p_keys, columns=['Database name', 'Table Name', 'Primary Key'])
spider_foreign = pd.DataFrame(f_keys,
columns=['Database name', 'First Table Name', 'Second Table Name', 'First Table Foreign Key',
'Second Table Foreign Key'])
return spider_schema,spider_primary,spider_foreign
def SQL_checker(sql, database):
# sql be like: "SELECT * FROM car_1 WHERE car_1.id = 1"
# database is the path to local xxx.sqlite
# the function of this part is to check if the sql is valid, if not, return the error message
path = DATABASE_PATH + '/' + database + '/' + database + '.sqlite'
try:
# Connect to the SQLite database
conn = sqlite3.connect(path)
# Create a cursor object to execute the SQL query
cursor = conn.cursor()
# Execute the SQL query
cursor.execute(sql)
# Commit the transaction and close the connection
conn.commit()
conn.close()
# Return a success message if the SQL query is valid
prompt = "The SQL query is valid in grammar."
checker = False
except sqlite3.Error as e:
# Return the error message if the SQL query is not valid
instruction = f"""#### the sql generated by you: {sql}, has error like :{e} , please fix the error and generate again. \n"""
fields = find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "Primary_keys = " + find_primary_keys_MYSQL_like(database)
prompt = instruction + fields + checker_prompt
checker = True
return prompt, checker
import time
def GPT4_generation(prompt):
limit_marker = False
fake_SQL = "SELECT COUNT(*) FROM singer"
while True:
try:
response = openai.ChatCompletion.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=600,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
return response['choices'][0]['message']['content'], limit_marker
except openai.error.RateLimitError as e:
print(f"RateLimitError: {e}")
print("Sleeping for 20 seconds...")
time.sleep(20)
print("Retrying...")
except Exception as e:
print(f"Unexpected error: {e}")
return fake_SQL, limit_marker
token = "Wgj-oa5yHxfmjo0lLybtWGLiWYoKTZ07NXcUiaPiUHmtQQiAKlfzNTOA9lwqmCz2N0qGFg."
chatbot = Chatbot(token)
def Bard_generation(prompt):
a = chatbot.ask(prompt)
answer = a['content']
print(answer)
return answer
def save_breaker(breaker):
with open("breaker.txt", "w") as f:
f.write(str(breaker))
# Function to load the breaker value from a file
def load_breaker():
if os.path.exists("breaker.txt"):
with open("breaker.txt", "r") as f:
breaker = int(f.read())
if breaker > 1037:
breaker = 0
else:
breaker = breaker
return breaker
return 0
if __name__ == '__main__':
###########################################################################################
# load the data
spider_schema,spider_primary,spider_foreign = creatiing_schema(DATASET_SCHEMA)
val_df = load_data(DATASET)
Log_content = []
for index,sample in val_df.iterrows():
print('index:',index)
db_id = sample['database_id'] # e.g.'car_1'
question_final = sample['final']['utterance'] # e.g.'How many car models are produced by each maker? List the count and the maker full name.'
query_final = sample['final']['query'] # e.g.'SELECT COUNT(*) FROM car_1 WHERE car_1.id = 1'
schema = find_fields_MYSQL_like(db_id) + '\n' + "foreign key:" + find_foreign_keys_MYSQL_like(
db_id) + '\n' + "primary key:" + find_primary_keys_MYSQL_like(db_id) #
'''
schema: Table car_makers, columns = [*,Id,Maker,FullName,Country]
Table car_names, columns = [*,MakeId,Model,Make]
Table cars_data, columns = [*,Id,MPG,Cylinders,Edispl,Horsepower,Weight,Accelerate,Year]
Table continents, columns = [*,ContId,Continent]
Table countries, columns = [*,CountryId,CountryName,Continent]
Table model_list, columns = [*,ModelId,Maker,Model]
foreign key:[countries.Continent = continents.ContId,car_makers.Country = countries.CountryId,model_list.Maker = car_makers.Id,car_names.Model = model_list.Model,cars_data.Id = car_names.MakeId]
primary key:[continents.ContId,countries.CountryId,car_makers.Id,model_list.ModelId,car_names.MakeId,cars_data.Id]
'''
# for first round:
# input: question+db_id+schema+three_sqls
# output: sql
# for other rounds and final round:
# input: question + message + generated_sql
# output: sql
message = ''
old_message = ''
history = {}
tmp = {}
SQLs_temp_pred = []
SQLs_temp_gold = []
tmp['question'] = question_final
for round, dialog in enumerate(sample['interaction']): # assueme the goal it to output the final sql by using final question and dialog information
print(f'The {round} round of dialog in sample {index}:') # each sample has at least 1 previous conversation
question_round = dialog['utterance']
query_round = dialog['query']
if round == 0:
old_message = message + \
SQL_generation_prompt + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema + \
"\nSome samples to text2sql:" + one_shot_Cosql_prompt_without_explain
message = message + \
SQL_generation_prompt + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema + \
"\nSome samples to text2sql:" + one_shot_Cosql_prompt_without_explain+ \
"\nQuestion:" + question_round + \
"\nOutput:"
else:
message = old_message + \
Contextual_prompt + \
"\nThis is previous question:" + history['question'] + \
"\nThis is your previous generated SQl:" + history['query']+ \
"\nQuestion:" + question_round + \
"\nOutput:"
old_message = old_message + \
"\nThis is previous question:" + history['question'] + \
"\nThis is your previous generated SQl:" + history['query']
print('message:',message)
SQL= Bard_generation(message)
SQL = SQL.replace('\n',' ')
print('\nGPT generated SQL:',SQL+'\n')
history['question'] = question_round
history['query'] = SQL
'''
save the log and generated sql, gold sql in some file: may need to use some process as the response is like:
SELECT car_names.Model, COUNT(cars_data.Id) AS popularity
FROM car_names
JOIN cars_data ON cars_data.Id = car_names.MakeId
GROUP BY car_names.Model
ORDER BY popularity DESC;
There are '\n' in line, and I don't want it
'''
SQLs_temp_pred.append(SQL)
SQLs_temp_gold.append(query_round+'\t'+db_id)
# this loop will focus on the final round, which is the 'final' in dataset
with open ('./predicted_sql.txt','a') as f:
for line in SQLs_temp_pred:
f.write(line+'\n')
with open ('./gold_sql.txt','a') as f:
for line in SQLs_temp_gold:
f.write(line+'\n')
# CUDA_VISIBLE_DEVICES=7 python read_cosql.py | [
"\nHere is a sample of multi-turn text2sql for you to understand the task.\nTable advisor, columns = [*,s_ID,i_ID]\nTable classroom, columns = [*,building,room_number,capacity]\nTable course, columns = [*,course_id,title,dept_name,credits]\nTable department, columns = [*,dept_name,building,budget]\nTable instructor, columns = [*,ID,name,dept_name,salary]\nTable prereq, columns = [*,course_id,prereq_id]\nTable section, columns = [*,course_id,sec_id,semester,year,building,room_number,time_slot_id]\nTable student, columns = [*,ID,name,dept_name,tot_cred]\nTable takes, columns = [*,ID,course_id,sec_id,semester,year,grade]\nTable teaches, columns = [*,ID,course_id,sec_id,semester,year]\nTable time_slot, columns = [*,time_slot_id,day,start_hr,start_min,end_hr,end_min]\n\nforeign key:[course.dept_name = department.dept_name,instructor.dept_name = department.dept_name,section.building = classroom.building,section.room_number = classroom.room_number,section.course_id = course.course_id,teaches.ID = instructor.ID,teaches.course_id = section.course_id,teaches.sec_id = section.sec_id,teaches.semester = section.semester,teaches.year = section.year,student.dept_name = department.dept_name,takes.ID = student.ID,takes.course_id = section.course_id,takes.sec_id = section.sec_id,takes.semester = section.semester,takes.year = section.year,advisor.s_ID = student.ID,advisor.i_ID = instructor.ID,prereq.prereq_id = course.course_id,prereq.course_id = course.course_id]\nprimary key:[classroom.building,department.dept_name,course.course_id,instructor.ID,section.course_id,teaches.ID,student.ID,takes.ID,advisor.s_ID,time_slot.time_slot_id,prereq.course_id]\n\nIteration 1:\nQuestion: Find out the average salary of professors?\nSELECT avg ( salary ) FROM instructor\n\nIteration 2: # iteration 2 will see the question and sql in iteration 1\nQuestion: Find the average salary of the professors of each department?\nSELECT avg ( salary ) , dept_name FROM instructor GROUP BY dept_name\n\nIteration 3: # iteration 3 will see the questiones and sqls in iteration 2 and 1\nQuestion: Which department has the highest average salary of professors?\nSELECT dept_name FROM instructor GROUP BY dept_name ORDER BY avg ( salary ) DESC LIMIT 1\n\nQuesion: show the train name and station name for each train.\n\n\n",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"\nPlease help me generate the corresponding SQL query with no further explaination.\n",
"\nSorry, I won't give you any examples. Please generate based on your own semantic parsing ability.\n",
"The SQL query is valid in grammar.",
"\nYou are an expert in SQL. I will give you a natural language question and a database schema, \nplease help me generate the corresponding SQL query with no further explaination.\n",
"\nHere is some examples of EASY, MEDIUM and HARD SQL queries.\nSELECT count(*) FROM singer \nSELECT avg(weight) , pettype FROM pets GROUP BY pettype\nSELECT T1.fname , T1.age FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'dog' AND T1.stuid NOT IN (SELECT T1.stuid FROM student AS T1 JOIN has_pet AS T2 ON T1.stuid = T2.stuid JOIN pets AS T3 ON T3.petid = T2.petid WHERE T3.pettype = 'cat')\n",
"\nNow I will give you some context (question and your own answer). Please generate the corresponding SQL query with no further explaination.\n"
] |
2024-01-10 | YanJiaHuan/Text2Sql | multi_turn~Bard_GPT~V0_fewshot_other~V0_fewshot_other.py | import pandas as pd
import time
import re
import openai
import os
from os import environ
import sys
import tiktoken
import sqlite3
from Bard import Chatbot
'''
This is a few-shot prompting trail for GPT-3.5, which works as a baseline for our experimens.
Version 0_1:
1. GPT will be given a database schema and a natural language question, and it will generate the corresponding SQL query.
2. we will give GPT a few-shot prompting, which is a few SQL queries with the same database schema.
3. we will try 1-shot,3-shot, 5-shot (sampling from the other datasets (exclude Spider)) and see how GPT performs.
'''
#################### 0. Prompt ####################
SQL_generation_prompt = '''
You are an expert in SQL. I will give you a natural language question and a database schema,
please help me generate the corresponding SQL query with no further explaination.
'''
checker_prompt = '''
Please help me generate the corresponding SQL query with no further explaination.
'''
#################### 1. Set up ####################
#----------------------------------------------------------------------------------------------------------
# API_KEY = "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" # 自己的
API_KEY = "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" # 买的
# API_KEY = "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" # gpt4 孙哥
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = os.getenv("OPENAI_API_KEY")
#changed
task = 'Spider' # 1 for CoSQL, 2 for Spider
if task == 'CoSQL':
path_to_CoSQL = "./cosql_dataset"
DATASET_SCHEMA = path_to_CoSQL+"/tables.json"
DATASET = path_to_CoSQL+"/sql_state_tracking/cosql_dev.json"
OUTPUT_FILE_1 = "./predicted_sql.txt"
OUTPUT_FILE_2 = "./gold_sql.txt"
DATABASE_PATH = path_to_CoSQL+"/database"
else:
path_to_Spider = "/Users/yan/Desktop/text2sql/spider"
DATASET_SCHEMA = path_to_Spider + "/tables.json"
DATASET = path_to_Spider + "/dev.json"
OUTPUT_FILE_1 = "./Spider/predicted_sql.txt"
OUTPUT_FILE_2 = "./Spider/gold_sql.txt"
DATABASE_PATH = path_to_Spider + "/database"
# set max tokens limit
MAX_TOKENS = 4096
model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
encoding = tiktoken.encoding_for_model(model_name)
# count the token
def num_tokens_from_string(string: str, model_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# load dataset
def load_data(DATASET):
return pd.read_json(DATASET)
def find_foreign_keys_MYSQL_like(db_name):
df = spider_foreign[spider_foreign['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['First Table Name'] + '.' + row['First Table Foreign Key'] + " = " + row['Second Table Name'] + '.' + row['Second Table Foreign Key'] + ','
output= output[:-1] + "]"
return output
def find_fields_MYSQL_like(db_name):
df = spider_schema[spider_schema['Database name'] == db_name]
df = df.groupby(' Table Name')
output = ""
for name, group in df:
output += "Table " +name+ ', columns = ['
for index, row in group.iterrows():
output += row[" Field Name"]+','
output = output[:-1]
output += "]\n"
return output
def find_primary_keys_MYSQL_like(db_name):
df = spider_primary[spider_primary['Database name'] == db_name]
output = "["
for index, row in df.iterrows():
output += row['Table Name'] + '.' + row['Primary Key'] +','
output = output[:-1]
output += "]\n"
return output
def creatiing_schema(DATASET_JSON):
schema_df = pd.read_json(DATASET_JSON)
schema_df = schema_df.drop(['column_names','table_names'], axis=1)
schema = []
f_keys = []
p_keys = []
for index, row in schema_df.iterrows():
tables = row['table_names_original']
col_names = row['column_names_original']
col_types = row['column_types']
foreign_keys = row['foreign_keys']
primary_keys = row['primary_keys']
for col, col_type in zip(col_names, col_types):
index, col_name = col
if index == -1:
for table in tables:
schema.append([row['db_id'], table, '*', 'text'])
else:
schema.append([row['db_id'], tables[index], col_name, col_type])
for primary_key in primary_keys:
index, column = col_names[primary_key]
p_keys.append([row['db_id'], tables[index], column])
for foreign_key in foreign_keys:
first, second = foreign_key
first_index, first_column = col_names[first]
second_index, second_column = col_names[second]
f_keys.append([row['db_id'], tables[first_index], tables[second_index], first_column, second_column])
spider_schema = pd.DataFrame(schema, columns=['Database name', ' Table Name', ' Field Name', ' Type'])
spider_primary = pd.DataFrame(p_keys, columns=['Database name', 'Table Name', 'Primary Key'])
spider_foreign = pd.DataFrame(f_keys,
columns=['Database name', 'First Table Name', 'Second Table Name', 'First Table Foreign Key',
'Second Table Foreign Key'])
return spider_schema,spider_primary,spider_foreign
def SQL_checker(sql, database):
# sql be like: "SELECT * FROM car_1 WHERE car_1.id = 1"
# database is the path to local xxx.sqlite
# the function of this part is to check if the sql is valid, if not, return the error message
path = DATABASE_PATH + '/' + database + '/' + database + '.sqlite'
try:
# Connect to the SQLite database
conn = sqlite3.connect(path)
# Create a cursor object to execute the SQL query
cursor = conn.cursor()
# Execute the SQL query
cursor.execute(sql)
# Commit the transaction and close the connection
conn.commit()
conn.close()
# Return a success message if the SQL query is valid
prompt = "The SQL query is valid in grammar."
checker = False
except sqlite3.Error as e:
# Return the error message if the SQL query is not valid
instruction = f"""#### the sql generated by you: {sql}, has error like :{e} , please fix the error and generate again. \n"""
fields = find_fields_MYSQL_like(database)
fields += "Foreign_keys = " + find_foreign_keys_MYSQL_like(database) + '\n'
fields += "Primary_keys = " + find_primary_keys_MYSQL_like(database)
prompt = instruction + fields + checker_prompt
checker = True
return prompt, checker
import time
def GPT4_generation(prompt):
'''
openai.error.RateLimitError: Rate limit reached for default-gpt-3.5-turbo
in organization org-GFmlumrCZBB2Y40fVv7f8qgp on requests per min. Limit: 3 / min.
Please try again in 20s. Contact us through our help center at help.openai.com if you continue to have issues.
Please add a payment method to your account to increase your rate limit.
Visit https://platform.openai.com/account/billing to add a payment method.
'''
limit_marker = False
fake_SQL = "SELECT COUNT(*) FROM singer"
while True:
try:
response = openai.ChatCompletion.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
n = 1,
stream = False,
temperature=0.0,
max_tokens=600,
top_p = 1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
return response['choices'][0]['message']['content'], limit_marker
except openai.error.RateLimitError as e:
print(f"RateLimitError: {e}")
print("Sleeping for 20 seconds...")
time.sleep(20)
print("Retrying...")
except Exception as e:
print(f"Unexpected error: {e}")
return fake_SQL, limit_marker
# initial the chatbot
def extract_sql(response):
matches = re.findall(r'```sql\n(.*?)\n```', response, re.DOTALL)
return matches
tokens=(
"WwiJN0oLURBx7gX_O8WVz9Fufj1iefdzkpt2fsbsb-e8al2Kvufapnj5mYa6vGo5P1ub9w.",
"WwhXnsbFLxozhOKG1-NUO78iif9IiN5El3Qk9yk5fi70TMcaUMOwfWwjTyqAyNe6MCtiEA.",
"Wwi1wxVyz-X2piJk8Ts84d08Fm1UmHDTOS7ftlD6LCXdbUVjFrQlJfl97an8UHhZQM8juQ.",
"Wwj6xMcUvzQUaKwcRQ-qvwrIcZLDBRp9XP25HkEVBAJDVZBzujepzI_dttehdJiCAjCIMg.",
"WwjMZ_TL9xIl4jREPppT5df6tAsjLLgjRo_GKK5iLslGOh5lMtstOMP_iJEADXq6gjFEKA.",
"Wgj-oa5yHxfmjo0lLybtWGLiWYoKTZ07NXcUiaPiUHmtQQiAKlfzNTOA9lwqmCz2N0qGFg."
)
def Bard_generation(prompt):
limit_marker = False
token_index = 0
chatbot = Chatbot(tokens[token_index])
answer = chatbot.ask(prompt)
print('whole answer', answer)
while True: # This loop will continue until a string is returned
if isinstance(answer, dict): # check if answer is a dictionary (error response)
limit_marker = True
print("Token limit reached, switching to a new token...")
token_index += 1 # Move to the next token
if token_index >= len(tokens): # If we've used all tokens, start over
token_index = 0
print("exceeding total limit, Waiting 15 seconds...")
time.sleep(15) # freeze for 15s
chatbot = Chatbot(tokens[token_index]) # Create a new chatbot with the new token
answer = chatbot.ask(prompt) # resend the request
else:
return answer[0][0], limit_marker
def save_breaker(breaker):
with open("breaker.txt", "w") as f:
f.write(str(breaker))
# Function to load the breaker value from a file
def load_breaker():
if os.path.exists("breaker.txt"):
with open("breaker.txt", "r") as f:
breaker = int(f.read())
if breaker > 1037:
breaker = 0
else:
breaker = breaker
return breaker
return 0
if __name__ == '__main__':
###########################################################################################
# load the data
spider_schema,spider_primary,spider_foreign = creatiing_schema(DATASET_SCHEMA)
val_df = load_data(DATASET)
SQLs_temp_pred = []
SQLs_temp_gold = []
for index,sample in val_df.iterrows():
print('index:',index)
db_id = sample['db_id'] # e.g.'car_1'
question = sample['question'] # e.g.'How many car models are produced by each maker? List the count and the maker full name.'
SQL_gold = sample['query'] # e.g.'SELECT COUNT(*) FROM car_1 WHERE car_1.id = 1'
print('SQL_gold:',SQL_gold)
schema = find_fields_MYSQL_like(db_id) + '\n' + "foreign key:" + find_foreign_keys_MYSQL_like(
db_id) + '\n' + "primary key:" + find_primary_keys_MYSQL_like(db_id) #
###############################################
'''message to GPT, to get SQL'''
message_GPT = SQL_generation_prompt + \
"\ndatabase:" + db_id + \
"\ndatabase chema:" + schema + \
"Just give me the plain SQL without any placeholders." + \
"\nquestion:" + question+ \
"\nYour SQL:"
print('message to GPT3.5:', message_GPT)
SQL, limit_marker = GPT4_generation(message_GPT)
print('SQL:', SQL)
SQL = SQL.replace('\n', ' ')
print('\nGPT generated SQL:', SQL + '\n')
SQLs_temp_pred.append(SQL)
SQLs_temp_gold.append(SQL_gold+'\t'+db_id)
with open ('./predicted_sql.txt', 'a') as f:
f.write(SQL+'\n')
with open ('./gold_sql.txt', 'a') as f:
f.write(SQL_gold+'\t'+db_id+'\n')
# CUDA_VISIBLE_DEVICES=7 python read_cosql.py | [
"The SQL query is valid in grammar.",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"\nYou are an expert in SQL. I will give you a natural language question and a database schema, \nplease help me generate the corresponding SQL query with no further explaination.\n",
"\nPlease help me generate the corresponding SQL query with no further explaination.\n"
] |
2024-01-10 | greenTea31/your-butler | flask_server~modules~agents~finanical_information.py | import os
from langchain import LLMMathChain, LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
from langchain.vectorstores.faiss import FAISS
def get_response_from_query(query: str) -> str:
if query is None:
return ""
print(f"query : {query}")
# FAISS 먼저 적용하고 오기
# docs = vector_db.similarity_search(query, k=k)
embedding = OpenAIEmbeddings(openai_api_key=os.environ.get("OPENAI_API_KEY"))
path = "./DB/vector/korea_bank_700_information/index.faiss"
print(os.getcwd())
if os.path.exists(path):
print(f"The file {path} exists.")
else:
print(f"The file {path} does not exist.")
vector_db = FAISS.load_local("./DB/vector/korea_bank_700_information", embedding)
docs = vector_db.similarity_search(query)
chat = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0)
template = """
당신은 부동산을 구매하려는 사용자에게 금융, 부동산과 관련된 정보를 제공하는 assistant입니다.
답변의 형식은 아래와 같이 진행합니다.
"유저가 모르는 단어": "이에 대한 설명"
"유저가 모르는 단어2": "이에 대한 설명2"
Document retrieved from your DB : {docs}
Answer the questions referring to the documents which you Retrieved from DB as much as possible.
"""
# If you fell like you don't have enough-information to answer the question, say "제가 알고 있는 정보가 없습니다."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "Answer the following question IN KOREAN: {question}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
chain = LLMChain(llm=chat, prompt=chat_prompt)
response = chain.run(docs=docs, question=query)
print(f"response = {response}")
return response | [
"이에 대한 설명2",
"[PLACEHOLDER, PLACEHOLDER]",
"\n 당신은 부동산을 구매하려는 사용자에게 금융, 부동산과 관련된 정보를 제공하는 assistant입니다.\n 답변의 형식은 아래와 같이 진행합니다.\n\n \"유저가 모르는 단어\": \"이에 대한 설명\"\n \"유저가 모르는 단어2\": \"이에 대한 설명2\"\n \n Document retrieved from your DB : {docs}\n Answer the questions referring to the documents which you Retrieved from DB as much as possible.\n ",
"Answer the following question IN KOREAN: {question}",
"유저가 모르는 단어",
"이에 대한 설명",
"유저가 모르는 단어2"
] |
2024-01-10 | greenTea31/your-butler | flask_server~output_parsers.py | from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
class LoanIntel(BaseModel):
loan_name: str = Field(description="Name of the loan")
loan_interest: float = Field(description="Interest rate of the loan")
message: str = Field(description="answer to the user")
bank_name: str = Field(description="Name of the bank")
def to_dict(self):
return {
"loan_name": self.loan_name,
"loan_interest": self.loan_interest,
"message": self.message,
"bank_name": self.bank_name,
}
loan_intel_parser: PydanticOutputParser = PydanticOutputParser(
pydantic_object=LoanIntel
)
| [] |
2024-01-10 | greenTea31/your-butler | flask_server~modules~agents~intention_decesion.py | from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
def decide(chat: str):
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0)
_DEFAULT_TEMPLATE = """
당신은 사용자의 입력을 읽고 그에 대한 의도를 파악하는 assistant입니다.
사용자의 채팅을 보고 다음과 같은 3가지 채팅으로 분류하여 출력해야 합니다.
1. 사용자가 대출 상품의 정보를 얻기를 원하는 의도의 질문
2. 사용자가 대출 상품 외의, 금융 용어에 대한 정보를 얻기를 원하는 의도의 질문
3. 그 외의 질문
대답의 형태는 반드시 아래와 같아야 합니다. 큰 따옴표에 유의하십시오. 반드시 JSON으로 Parsing이 가능한 형태여야 합니다.
{{
"loan": 대출 상품의 정보를 얻기를 원하는 의도의 질문이 들어갑니다,
"finance": 금융 용어에 대한 정보를 얻기를 원하는 의도의 질문이 들어갑니다,
"other": 그 외의 질문이 들어갑니다.
}}
사용자의 채팅 정보는 아래와 같습니다.
{chat}
"""
prompt_template = PromptTemplate(
input_variables=["chat"], template=_DEFAULT_TEMPLATE
)
chain = LLMChain(llm=llm, prompt=prompt_template)
response = chain.run(chat)
print(f"decide response = {response}")
return response
| [
"\n 당신은 사용자의 입력을 읽고 그에 대한 의도를 파악하는 assistant입니다.\n 사용자의 채팅을 보고 다음과 같은 3가지 채팅으로 분류하여 출력해야 합니다.\n \n 1. 사용자가 대출 상품의 정보를 얻기를 원하는 의도의 질문\n \n 2. 사용자가 대출 상품 외의, 금융 용어에 대한 정보를 얻기를 원하는 의도의 질문\n \n 3. 그 외의 질문\n \n 대답의 형태는 반드시 아래와 같아야 합니다. 큰 따옴표에 유의하십시오. 반드시 JSON으로 Parsing이 가능한 형태여야 합니다.\n \n {{\n \"loan\": 대출 상품의 정보를 얻기를 원하는 의도의 질문이 들어갑니다,\n \"finance\": 금융 용어에 대한 정보를 얻기를 원하는 의도의 질문이 들어갑니다,\n \"other\": 그 외의 질문이 들어갑니다.\n }}\n \n 사용자의 채팅 정보는 아래와 같습니다.\n \n {chat}\n ",
"loan",
"finance"
] |
2024-01-10 | greenTea31/your-butler | flask_server~modules~agents~database_query.py | import pymysql
import os
from langchain.chat_models import ChatOpenAI
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from langchain.prompts.prompt import PromptTemplate
def query_loan_chain(chat: str) -> str:
if chat is None:
return ""
# chat = global_chat
print(f"chat : {chat}")
db = pymysql.connect(
host="j9a405.p.ssafy.io",
port=3306,
user="root",
passwd=f"{os.environ.get('MYSQL_PASSWORD')}",
db="loan",
charset="utf8",
autocommit=True,
)
db = SQLDatabase.from_uri(f"mysql+pymysql://root:{os.environ.get('MYSQL_PASSWORD')}@j9a405.p.ssafy.io:3306/loan",
include_tables=["mortgage_loan", "jeonse_loan", "credit_loan"],
sample_rows_in_table_info=5)
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0)
_DEFAULT_TEMPLATE = """Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Use the following format. SQLQuery는 유저에게 최대한 많은 정보를 제공하기 위해, id를 제외한 모든 column을 포함하여 조회하게끔 생성되어야만 합니다.:
Question: "Question here"
SQLQuery: "SQL Query to run (SELECT * FROM table_name WHERE conditions)"
SQLResult: "Result of the SQLQuery"
Answer: Final answer with SQL Result in JSON format as below.
{{
"key1": "value1",
"key2": "value2",
"key3": "value3",
...,
"keyN": "valueN"
}}
Only use the following tables:
{table_info}
If someone asks for the table credit_loan(개인신용대출), 최저금리를 조회하기 위해 사용되는 column 이름을 신용점수에 따라 적절히 선택하여야 한다.
Question: {input}
"""
PROMPT = PromptTemplate(
input_variables=["input", "table_info", "dialect"], template=_DEFAULT_TEMPLATE
)
# db_chain = SQLDatabaseChain.from_llm(
# llm, db, prompt=PROMPT, verbose=True, use_query_checker=True, return_intermediate_steps=True
# )
db_chain = SQLDatabaseChain.from_llm(
llm, db, prompt=PROMPT, verbose=True, use_query_checker=True
)
response = db_chain.run(chat)
# response = db_chain(chat)
# return response["intermediate_steps"]
return response | [
"Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\n Use the following format. SQLQuery는 유저에게 최대한 많은 정보를 제공하기 위해, id를 제외한 모든 column을 포함하여 조회하게끔 생성되어야만 합니다.:\n\n Question: \"Question here\"\n SQLQuery: \"SQL Query to run (SELECT * FROM table_name WHERE conditions)\"\n SQLResult: \"Result of the SQLQuery\"\n Answer: Final answer with SQL Result in JSON format as below.\n {{\n \"key1\": \"value1\",\n \"key2\": \"value2\",\n \"key3\": \"value3\",\n ...,\n \"keyN\": \"valueN\"\n }}\n\n Only use the following tables:\n\n {table_info}\n\n If someone asks for the table credit_loan(개인신용대출), 최저금리를 조회하기 위해 사용되는 column 이름을 신용점수에 따라 적절히 선택하여야 한다.\n\n Question: {input}\n ",
"table_info",
"SQL Query to run (SELECT * FROM table_name WHERE conditions)",
"Result of the SQLQuery",
"input",
"Question here"
] |
2024-01-10 | Rohan5manza/LLM-powered-Applications | app2.py | import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.llms import HuggingFaceHub
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks, model_name):
embeddings = HuggingFaceInstructEmbeddings(model_name=model_name)
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore, model_name):
llm = HuggingFaceHub(repo_id=model_name, model_kwargs={"temperature":0.5, "max_length":512})
memory = ConversationBufferMemory(memory_key='chat_history', retur_message=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question':user_question})
st.session_state.chat_history = response['chat_history']
st.write(response)
def main():
load_dotenv()
st.set_page_config(page_title="Chat with multiple PDFs", page_icon="books:")
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header("Chat with multiple PDFs:books:")
user_question = st.text_input("Ask a question")
# Dropdown menu for selecting Hugging Face model
model_options = ["google/flan-t5-xxl", "kaist-ai/prometheus-7b-v1.0","lmsys/fastchat-t5-3b-v1.0"]
selected_model = st.sidebar.selectbox("Select Hugging Face Model", model_options)
if user_question:
if st.session_state.conversation is None or st.session_state.conversation.llm.model_id != selected_model:
st.session_state.conversation = None
st.session_state.chat_history = None
st.session_state.vectorstore = None
if st.session_state.vectorstore is None:
st.warning("Please upload PDFs and click 'Process' to initialize the model.")
else:
handle_userinput(user_question)
with st.sidebar:
st.subheader("Your documents")
pdf_docs = st.file_uploader("Upload your pdfs here and click on Process", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
raw_text = get_pdf_text(pdf_docs)
text_chunks = get_text_chunks(raw_text)
st.session_state.vectorstore = get_vectorstore(text_chunks, selected_model)
st.session_state.conversation = get_conversation_chain(st.session_state.vectorstore, selected_model)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | Rohan5manza/LLM-powered-Applications | csv.py | import streamlit as st
from streamlit_chat import message
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores import FAISS
import tempfile
user_api_key = st.sidebar.text_input(
label="#### Your OpenAI API key 👇",
placeholder="Paste your openAI API key, sk-",
type="password")
uploaded_file = st.sidebar.file_uploader("upload", type="csv")
if uploaded_file :
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
tmp_file_path = tmp_file.name
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8")
data = loader.load()
embeddings = OpenAIEmbeddings()
vectors = FAISS.from_documents(data, embeddings)
chain = ConversationalRetrievalChain.from_llm(llm = ChatOpenAI(temperature=0.0,model_name='gpt-3.5-turbo', openai_api_key=user_api_key),
retriever=vectors.as_retriever())
def conversational_chat(query):
result = chain({"question": query, "chat_history": st.session_state['history']})
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hello ! Ask me anything about " + uploaded_file.name + " 🤗"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey ! 👋"]
#container for the chat history
response_container = st.container()
#container for the user's text input
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Talk about your csv data here (:", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
output = conversational_chat(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="big-smile")
message(st.session_state["generated"][i], key=str(i), avatar_style="thumbs")
#streamlit run tuto_chatbot_csv.py | [] |
2024-01-10 | Rohan5manza/LLM-powered-Applications | csvapp.py | import streamlit as st
from dotenv import load_dotenv
from langchain.agents.agent_types import AgentType
#from langchain.chat_models import ChatOpenAI
#from langchain.llms import OpenAI
import tempfile
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import HuggingFaceHub
from langchain.document_loaders.csv_loader import CSVLoader
def main():
load_dotenv()
st.set_page_config(page_title="ask your CSV questions")
st.header("Ask your CSV")
user_csv=st.file_uploader("Upload your csv file",type="csv")
if user_csv :
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(user_csv.getvalue())
tmp_file_path = tmp_file.name
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8")
data = loader.load()
embeddings=HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
vectors = FAISS.from_documents(data, embeddings)
user_question=st.text_input("Ask a question about your CSV:")
#llm=OpenAI(temperature=0.8)
llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":3000})
conversation_chain=ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectors.as_retriever())
#agent=create_csv_agent(llm,user_csv,verbose=True,agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
def conversational_chat(query):
result = chain({"question": query, "chat_history": st.session_state['history']})
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hello ! Ask me anything about " + uploaded_file.name + " 🤗"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey ! 👋"]
#container for the chat history
response_container = st.container()
#container for the user's text input
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Talk about your csv data here (:", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
output = conversational_chat(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="big-smile")
message(st.session_state["generated"][i], key=str(i), avatar_style="thumbs")
if __name__=="__main__":
main()
| [] |
2024-01-10 | SOA-team3/transound | python_utils~transcriber~transcribe_openai.py | import os
# https://platform.openai.com/docs/guides/speech-to-text/supported-languages
from openai import OpenAI
import sys
# Get Ruby Input
ruby_input = sys.stdin.read()
audio_file_path = ruby_input.split('\n')[0]
openai_api_key = ruby_input.split('\n')[1]
# Set your OpenAI API key
os.environ["OPENAI_API_KEY"] = openai_api_key
# Create an OpenAI client
client = OpenAI()
# Open the audio file
# audio_file_path = "podcast_mp3_store/Ricky Gervais: A Joke About His Will.mp3"
audio_file = open(audio_file_path, "rb")
# Create a transcription using the Whisper model
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file,
response_format="text"
)
# Print the generated transcript
print(transcript) | [] |
2024-01-10 | SOA-team3/transound | python_utils~translator~translate_openai.py | import os
import sys
from openai import OpenAI
# Get Ruby Input
ruby_input = sys.stdin.read()
text = ruby_input.split('\n')[0]
openai_api_key = ruby_input.split('\n')[1]
translate_language = ruby_input.split('\n')[2]
# Set your OpenAI API key
os.environ["OPENAI_API_KEY"] = openai_api_key
# Create an OpenAI client
client = OpenAI()
def translate_text(text, target_language):
response = client.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": f"Translate the following text into {target_language}: {text}\n"
},
],
temperature=0.7,
max_tokens=64,
top_p=1
)
return response.choices[0].message.content
# Print the generated translation
translation = translate_text(text, translate_language)
print(translation)
# Old version # this is no longer supported in openai>=1.0.0
# import openai
# openai.api_key = openai_api_key
# prompt = f"Translate the following text into {translate_language}: {text}\n"
# response = openai.Completion.create(
# engine = 'ada',
# prompt = prompt,
# max_tokens = 50,
# n = 1,
# temperature = 0.5
# )
# print(response.choices) | [
"Translate the following text into PLACEHOLDER: PLACEHOLDER\n"
] |
2024-01-10 | devdoshi/ghidra_tools | g3po~g3po.py | # Query OpenAI for a comment
#@author Lucca Fraser
#@category AI
#@keybinding
#@menupath
#@toolbar
import subprocess as sp
import textwrap
import logging
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
import json
import os
import re
from ghidra.app.script import GhidraScript
from ghidra.program.model.listing import Function, FunctionManager
from ghidra.program.model.mem import MemoryAccessException
from ghidra.util.exception import DuplicateNameException
from ghidra.program.model.symbol import SourceType
##########################################################################################
# Script Configuration
##########################################################################################
#MODEL = "text-curie-001" # Choose which large language model we query
MODEL = "text-davinci-003" # Choose which large language model we query
TEMPERATURE = 0.19 # Set higher for more adventurous comments, lower for more conservative
TIMEOUT = 600 # How many seconds should we wait for a response from OpenAI?
MAXTOKENS = 512 # The maximum number of tokens to request from OpenAI
C3POSAY = True # True if you want the cute C-3PO ASCII art, False otherwise
#LANGUAGE = "the form of a sonnet" # This can also be used as a style parameter for the comment
LANGUAGE = "English" # This can also be used as a style parameter for the comment
EXTRA = "" # Extra text appended to the prompt.
#EXTRA = "but write everything in the form of a sonnet" # for example
LOGLEVEL = INFO # Adjust for more or less line noise in the console.
COMMENTWIDTH = 80 # How wide the comment, inside the little speech balloon, should be.
C3POASCII = r"""
/~\
|oo )
_\=/_
/ \
//|/.\|\\
|| \_/ ||
|| |\ /| ||
# \_ _/ #
| | |
| | |
[]|[]
| | |
/_]_[_\
"""
##########################################################################################
SCRIPTDIR = os.path.dirname(os.path.realpath(__file__))
ICONPATH = os.path.join(SCRIPTDIR, "c3po.png")
# Now how do I set the icon? I'm not sure.
SOURCE = "OpenAI GPT-3"
TAG = SOURCE + " generated comment, take with a grain of salt:"
FOOTER = "Model: {model}, Temperature: {temperature}".format(model=MODEL, temperature=TEMPERATURE)
logging.getLogger().setLevel(LOGLEVEL)
def flatten_list(l):
return [item for sublist in l for item in sublist]
def wordwrap(s, width=COMMENTWIDTH, pad=True):
"""Wrap a string to a given number of characters, but don't break words."""
# first replace single line breaks with double line breaks
lines = [textwrap.TextWrapper(width=width,
break_long_words=False,
break_on_hyphens=True,
replace_whitespace=False).wrap(" " + L)
for L in s.splitlines()]
# now flatten the lines list
lines = flatten_list(lines)
if pad:
lines = [line.ljust(width) for line in lines]
return "\n".join(lines)
def boxedtext(text, width=COMMENTWIDTH, tag=TAG):
wrapped = wordwrap(text, width, pad=True)
wrapped = "\n".join([tag.ljust(width), " ".ljust(width), wrapped, " ".ljust(width), FOOTER.ljust(width)])
side_bordered = "|" + wrapped.replace("\n", "|\n|") + "|"
top_border = "/" + "-" * (len(side_bordered.split("\n")[0]) - 2) + "\\"
bottom_border = top_border[::-1]
return top_border + "\n" + side_bordered + "\n" + bottom_border
def c3posay(text, width=COMMENTWIDTH, character=C3POASCII, tag=TAG):
box = boxedtext(text, width, tag=tag)
headwidth = len(character.split("\n")[1]) + 2
return box + "\n" + " "*headwidth + "/" + character
def escape_unescaped_single_quotes(s):
return re.sub(r"(?<!\\)'", r"\\'", s)
# Example
# $ curl https://api.openai.com/v1/completions -H "Content-Type: application/json" -H "Authorization: Bearer $OPENAI_API_KEY" -d '{"model": "text-davinci-003", "prompt": "Say this is a test", "temperature": 0, "max_tokens": 7}'
def openai_request_cmd(prompt, temperature=0.19, max_tokens=MAXTOKENS, model=MODEL):
openai_api_key = os.getenv("OPENAI_API_KEY")
if openai_api_key is None:
logging.error("OpenAI API key not found in environment variables!")
return None
data = {
"model": MODEL,
"prompt": escape_unescaped_single_quotes(prompt), #prompt.replace("'", "\\'"),
"max_tokens": max_tokens,
"temperature": temperature
}
json_data = json.dumps(data)
url = "https://api.openai.com/v1/completions"
cmd = ["curl",
url,
"-H", "Content-Type: application/json",
"-H", "Authorization: Bearer {openai_api_key}".format(openai_api_key=openai_api_key),
"-d", json_data]
return cmd
def openai_request(prompt, temperature=0.19, max_tokens=MAXTOKENS, model=MODEL):
cmd = openai_request_cmd(prompt, temperature=temperature, max_tokens=max_tokens)
cmdstr = " ".join(cmd)
logging.info("Running command: {cmdstr}".format(cmdstr=cmdstr))
res = sp.Popen(cmd, shell=False, stdout=sp.PIPE, stderr=sp.PIPE)
exitcode = res.wait()
out = res.stdout.read()
err = res.stderr.read()
if exitcode != 0:
logging.error("OpenAI request failed with exit code {exitcode}".format(exitcode=exitcode))
logging.error("Error: {err}".format(err=err))
return None
logging.info("OpenAI request succeeded with exit code {exitcode}".format(exitcode=exitcode))
logging.info("Response: {out}".format(out=out))
try:
return json.loads(out)
except Exception as e:
logging.error("Failed to parse JSON response: {e}".format(e=e))
return None
def get_current_function():
listing = currentProgram.getListing()
function = listing.getFunctionContaining(currentAddress)
return function
def decompile_current_function(function=None):
if function is None:
function = get_current_function()
logging.info("Current address is at {currentAddress}".format(currentAddress=currentAddress.__str__()))
logging.info("Decompiling function: {function_name} at {function_entrypoint}".format(function_name=function.getName(), function_entrypoint=function.getEntryPoint().__str__()))
decomp = ghidra.app.decompiler.DecompInterface()
decomp.openProgram(currentProgram)
decomp_res = decomp.decompileFunction(function, TIMEOUT, monitor)
if decomp_res.isTimedOut():
logging.warning("Timed out while attempting to decompile '{function_name}'".format(function_name=function.getName()))
elif not decomp_res.decompileCompleted():
logging.error("Failed to decompile {function_name}".format(function_name=function.getName()))
logging.error(" Error: " + decomp_res.getErrorMessage())
return None
decomp_src = decomp_res.getDecompiledFunction().getC()
return decomp_src
def generate_comment(c_code, temperature=0.19, program_info=None, prompt=None, model=MODEL, max_tokens=MAXTOKENS):
intro = "Below is some C code that Ghidra decompiled from a binary that I'm trying to reverse engineer."
#program_info = get_program_info()
#if program_info:
# intro = intro.replace("a binary", f'a {program_info["language_id"]} binary')
if prompt is None:
prompt = """{intro}
```
{c_code}
```
Please provide a detailed explanation of what this code does, in {style}, that might be useful to a reverse engineer. Explain your reasoning as much as possible. Finally, suggest a suitable name for this function and for each variable bearing a default name, offer a more informative name, if the purpose of that variable is unambiguous. {extra}
""".format(intro=intro, c_code=c_code, style=LANGUAGE, extra=EXTRA)
print("Prompt:\n\n{prompt}".format(prompt=prompt))
response = openai_request(prompt=prompt, temperature=temperature, max_tokens=max_tokens, model=MODEL)
try:
res = response['choices'][0]['text'].strip()
print(res)
return res
except Exception as e:
logging.error("Failed to get response: {e}".format(e=e))
return None
def add_explanatory_comment_to_current_function(temperature=0.19, model=MODEL, max_tokens=MAXTOKENS):
function = get_current_function()
if function is None:
logging.error("Failed to get current function")
return None
old_comment = function.getComment()
if old_comment is not None:
if SOURCE in old_comment:
function.setComment(None)
else:
logging.info("Function already has a comment")
return None
c_code = decompile_current_function(function)
if c_code is None:
logging.error("Failed to decompile current function")
return
approximate_tokens = len(c_code) // 2
logging.info("Length of decompiled C code: {c_code_len} characters, guessing {approximate_tokens} tokens".format(c_code_len=len(c_code), approximate_tokens=approximate_tokens))
if approximate_tokens < max_tokens and approximate_tokens + max_tokens > 3000:
max_tokens = 4096 - approximate_tokens
comment = generate_comment(c_code, temperature=temperature, model=model, max_tokens=max_tokens)
if comment is None:
logging.error("Failed to generate comment")
return
if C3POSAY:
comment = c3posay(comment)
else:
comment = TAG + "\n" + comment
listing = currentProgram.getListing()
function = listing.getFunctionContaining(currentAddress)
try:
function.setComment(comment)
except DuplicateNameException as e:
logging.error("Failed to set comment: {e}".format(e=e))
return
logging.info("Added comment to function: {function_name}".format(function_name=function.getName()))
return comment
add_explanatory_comment_to_current_function(temperature=0.19, model=MODEL, max_tokens=MAXTOKENS)
| [
"Below is some C code that Ghidra decompiled from a binary that I'm trying to reverse engineer.\n\n```\nPLACEHOLDER\n```\n\nPlease provide a detailed explanation of what this code does, in English, that might be useful to a reverse engineer. Explain your reasoning as much as possible. Finally, suggest a suitable name for this function and for each variable bearing a default name, offer a more informative name, if the purpose of that variable is unambiguous. \n\n"
] |
2024-01-10 | iddy-ani/render_demo | dona_diagnosis.py | from app import app
from dash import dcc
from dash import html
import openai
from dash.dependencies import Input, Output, State
import os
api_key = os.environ.get(r'OPENAI_API_KEY')
# Set up OpenAI API key
openai.api_key = api_key
# Define the layout for the Dona's Diagnosis page
dona_layout = html.Div([
html.H1("Dona's Diagnosis"),
dcc.Input(id='question-input', type='text',
placeholder='Ask Dona a question...'),
html.Button('Submit', id='submit-question'),
html.Div(id='answer-output')
])
@app.callback(Output('answer-output', 'children'),
[Input('submit-question', 'n_clicks')],
[State('question-input', 'value')])
def generate_answer(n_clicks, question):
if not question:
return html.P("What has you here today?")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": question}]
)
answer = response['choices'][0]['message']['content']
return html.P(answer)
| [] |
2024-01-10 | microsoft/automated-explanations | notebooks_stories~1_generate~02_process_story.py | import os
import matplotlib.pyplot as plt
import seaborn as sns
from os.path import join
from tqdm import tqdm
import pandas as pd
import sys
from IPython.display import display, HTML
from typing import List
from sasc.modules.emb_diff_module import EmbDiffModule
import numpy as np
import matplotlib
import imodelsx.util
from copy import deepcopy
import re
import sasc.generate_helper
import sasc.viz
import scipy.special
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
from sasc.evaluate import D5_Validator
import openai
from sasc.modules.fmri_module import fMRIModule
from pprint import pprint
import joblib
from sasc.config import RESULTS_DIR
import torch.cuda
import scipy.special
def explanation_story_match(EXPT_DIR, expls, paragraphs, prompts):
if os.path.exists(join(EXPT_DIR, "story_data_match.pdf")):
return
val = D5_Validator()
# visualize single story
scores_data_story = sasc.viz.get_story_scores(val, expls, paragraphs)
joblib.dump(scores_data_story, join(EXPT_DIR, "scores_data_story.pkl"))
s_data = sasc.generate_helper.viz_paragraphs(
paragraphs,
scores_data_story,
expls,
prompts,
normalize_to_range=True,
moving_average=True,
shift_to_range=True,
)
with open(join(EXPT_DIR, "story.html"), "w") as f:
f.write(s_data.encode("ascii", errors="ignore").decode())
# compute scores heatmap
# print('expls', expls, 'paragraphs', paragraphs)
scores_mean, scores_all = sasc.generate_helper.compute_expl_data_match_heatmap(
val, expls, paragraphs
)
joblib.dump(
{"scores_mean": scores_mean, "scores_all": scores_all},
join(EXPT_DIR, "scores_data.pkl"),
)
sasc.viz.heatmap(scores_mean.T, expls, ylab="Story", xlab="Explanation")
# plt.savefig(join(EXPT_DIR, "story_data_match.png"), dpi=300)
plt.savefig(join(EXPT_DIR, "story_data_match.pdf"), bbox_inches="tight")
def module_story_match(EXPT_DIR, expls, paragraphs, voxel_nums, subject):
if os.path.exists(join(EXPT_DIR, f"scores_mod_ngram_length={0}.pkl")):
return
# compute with paragraphs overlapping into each other
# sasc.generate_helper.compute_expl_module_match_heatmap
(
scores_mod,
_,
all_scores,
) = sasc.generate_helper.compute_expl_module_match_heatmap_cached_single_subject(
expls, paragraphs, voxel_nums, subject
)
joblib.dump(
{
"scores_mean": scores_mod,
"scores_all": all_scores,
},
join(EXPT_DIR, f"scores_mod_ngram_length={0}.pkl"),
)
# make plot
s = scores_mod.T
s = scipy.special.softmax(s, axis=0)
sasc.viz.heatmap(s, expls, ylab="Story", xlab="Module")
diag_diff = (
np.mean(np.diag(s))
- (
np.mean(s[np.triu_indices_from(s, k=1)])
+ np.mean(s[np.tril_indices_from(s, k=-1)])
)
/ 2
).round(5)
plt.title(os.path.basename(EXPT_DIR) + " diag_diff=" + str(diag_diff))
plt.savefig(join(EXPT_DIR, f"story_module_match.pdf"), bbox_inches="tight")
# with overlaps
# ngram_lengths = [10, 50, 100, 384]
# for i, ngram_length in enumerate(ngram_lengths):
# print(i, '/', len(ngram_lengths), 'ngram length', ngram_length)
# scores_mod, scores_max_mod, all_scores, all_ngrams = \
# notebook_helper.compute_expl_module_match_heatmap_running(
# expls, paragraphs, voxel_nums, subjects,
# ngram_length=ngram_length, paragraph_start_offset_max=50,
# )
# joblib.dump({
# 'scores_mean': scores_mod,
# 'scores_all': all_scores,
# }, join(EXPT_DIR, f'scores_mod_ngram_length={ngram_length}.pkl'))
def sweep_default_and_polysemantic(subjects=["UTS01", "UTS03"], setting="default"):
EXPT_PARENT_DIR = join(RESULTS_DIR, "stories", setting)
EXPT_NAMES = sorted(os.listdir(EXPT_PARENT_DIR))[::-1]
# filter EXPT_NAMES that don't contain any of the subjects
EXPT_NAMES = [
expt_name
for expt_name in EXPT_NAMES
if any([subject.lower() in expt_name for subject in subjects])
]
for EXPT_NAME in EXPT_NAMES:
EXPT_DIR = join(EXPT_PARENT_DIR, EXPT_NAME)
try:
rows = joblib.load(join(EXPT_DIR, "rows.pkl"))
prompts_paragraphs = joblib.load(
join(EXPT_DIR, "prompts_paragraphs.pkl"),
)
prompts = prompts_paragraphs["prompts"]
paragraphs = prompts_paragraphs["paragraphs"]
except:
# old version
rows = pd.read_csv(join(EXPT_DIR, "rows.csv"))
prompts = open(join(EXPT_DIR, "prompts.txt")).read().split('\n\n')
paragraphs = open(join(EXPT_DIR, "story.txt")).read().split('\n\n')
expls = rows.expl.values
voxel_nums = rows.module_num.values
subjects = rows.subject.values
# run things
print("Computing expl<>story match", EXPT_NAME)
explanation_story_match(EXPT_DIR, expls, paragraphs, prompts)
torch.cuda.empty_cache()
print("Computing module<>story match", EXPT_NAME)
module_story_match(EXPT_DIR, expls, paragraphs,
voxel_nums, subjects[0])
torch.cuda.empty_cache()
def sweep_interactions(subjects=["UTS01", "UTS03"]):
# iterate over seeds
seeds = range(1, 8)
setting = "interactions"
for subject in subjects: # ["UTS01", "UTS03"]:
for seed in seeds:
STORIES_DIR = join(RESULTS_DIR, "stories")
EXPT_NAME = f"{subject.lower()}___jun14___seed={seed}"
EXPT_DIR = join(STORIES_DIR, setting, EXPT_NAME)
# rows = joblib.load(join(EXPT_DIR, "rows1.pkl"))
# expls = rows.expl.values
prompts_paragraphs = joblib.load(
join(EXPT_DIR, "prompts_paragraphs.pkl"),
)
rows1_rep = joblib.load(join(EXPT_DIR, "rows.pkl"))
prompts = prompts_paragraphs["prompts"]
paragraphs = prompts_paragraphs["paragraphs"]
voxel_nums = rows1_rep.module_num.values
subjects = rows1_rep.subject.values
expls = rows1_rep.expl.values
print(
f"Loaded {len(prompts)} prompts, {len(paragraphs)} paragraphs, {len(rows1_rep)} (repeated1) rows"
)
# run things
print("Computing expl<>story match", EXPT_NAME)
explanation_story_match(EXPT_DIR, expls, paragraphs, prompts)
torch.cuda.empty_cache()
print("Computing module<>story match", EXPT_NAME)
module_story_match(EXPT_DIR, expls, paragraphs,
voxel_nums, subjects[0])
torch.cuda.empty_cache()
if __name__ == "__main__":
# sweep_interactions(subjects=["UTS01", "UTS03"])
# sweep_default_and_polysemantic(subjects=['UTS01', 'UTS03'], setting="polysemantic")
# sweep_default_and_polysemantic(subjects=['UTS01', 'UTS03'], setting="default")
sweep_default_and_polysemantic(subjects=['UTS02'], setting="default")
| [
"\n\n",
"prompts_paragraphs.pkl",
"prompts.txt"
] |
2024-01-10 | gbasin/LLM-DB | src~scripts~calc_cosine_sim.py | import sys
import openai
import os
from tenacity import retry, stop_after_attempt, wait_random_exponential
from openai.datalib.numpy_helper import numpy as np
from typing import List, Optional
from dotenv import load_dotenv
#@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_embedding(text: str, engine="text-similarity-davinci-001", **kwargs) -> List[float]:
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return openai.Embedding.create(input=[text], engine=engine, **kwargs)["data"][0]["embedding"]
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def main():
# Load environment variables from .env file
load_dotenv()
openai.api_key = os.environ.get('OPENAI_API_KEY')
# process command
try:
# Get the first command-line argument
arg = sys.argv[1]
# Split the argument into a list of strings
str_list = arg.split('|')
embeddings = [get_embedding(str, engine='text-embedding-ada-002') for str in str_list]
# calc embeddings
for i, embedding in enumerate(embeddings):
print(cosine_similarity(embeddings[0], embedding))
except Exception as e:
print(f"An error occurred: {e}")
# cache.flush()
if __name__ == "__main__":
main() | [] |
2024-01-10 | shaheen-syed/LDA | evaluation.py | # -*- coding: utf-8 -*-
"""
Created by: Shaheen Syed
Date: August 2018
The evaluation phase includes a careful analysis and inspection of the latent variables from the various created LDA models. Since LDA is an unsupervised machine learning technique,
extra care should be given during this post-analysis phase; in contrast to, for example, supervised methods where typically a labeled gold-standard dataset exist.
Measures such as predictive likelihood on held-out data have been proposed to evaluate the quality of generated topics. However, such measures correlate negatively with human
interpretability, making topics with high predictive likelihood less coherent from a human perspective. High-quality or coherent latent topics are of particular importance when
they are used to browse document collections or understand the trends and development within a particular research field. As a result, researchers have proposed topic coherence measures,
which are a quantitative approach to automatically uncover the coherence of topics. Topics are considered to be coherent if all or most of the words (e.g., a topic's top-N words) are
related. Topic coherence measures aim to find measures that correlate highly with human topic evaluation, such as topic ranking data obtained by, for example, word and topic intrusion
tests. Human topic ranking data are often considered the gold standard and, consequently, a measure that correlates well is a good indicator for topic interpretability.
Exploring the topics by a human evaluator is considered the best approach. However, since this involves inspecting all the different models, this approach might not be feasible.
Topic coherence measures can quantitatively calculate a proxy for topic quality, and per our analysis, topics with high coherence were considered interpretable by domain experts.
Combing coherence measures with a manual inspection is thus a good approach to find the LDA model that result in meaningful and interpretable topics. In short, three questions
should be answered satisfactory:
- Are topics meaningful, interpretable, coherent and useful?
- Are topics within documents meaningful, appropriate and useful?
- Do the topics facilitate a better understanding of the underlying corpus?
The evaluation phase can also result in topics that are very similar (i.e., identical topics), topics that should ideally be merged or split (i.e., chained or mixed topics), topics
that are un-interpretable (i.e. nonsensical), or topics that contain unimportant, too specific, or too general words. In those cases, it would be wise to revisit the pre-processing
phase and repeat the analysis.
For reference articles see:
Syed, S., Borit, M., & Spruit, M. (2018). Narrow lenses for capturing the complexity of fisheries: A topic analysis of fisheries science from 1990 to 2016. Fish and Fisheries, 19(4), 643–661. http://doi.org/10.1111/faf.12280
Syed, S., & Spruit, M. (2017). Full-Text or Abstract? Examining Topic Coherence Scores Using Latent Dirichlet Allocation. In 2017 IEEE International Conference on Data Science and Advanced Analytics (DSAA) (pp. 165–174). Tokyo, Japan: IEEE. http://doi.org/10.1109/DSAA.2017.61
Syed, S., & Spruit, M. (2018a). Exploring Symmetrical and Asymmetrical Dirichlet Priors for Latent Dirichlet Allocation. International Journal of Semantic Computing, 12(3), 399–423. http://doi.org/10.1142/S1793351X18400184
Syed, S., & Spruit, M. (2018b). Selecting Priors for Latent Dirichlet Allocation. In 2018 IEEE 12th International Conference on Semantic Computing (ICSC) (pp. 194–202). Laguna Hills, CA, USA: IEEE. http://doi.org/10.1109/ICSC.2018.00035
Syed, S., & Weber, C. T. (2018). Using Machine Learning to Uncover Latent Research Topics in Fishery Models. Reviews in Fisheries Science & Aquaculture, 26(3), 319–336. http://doi.org/10.1080/23308249.2017.1416331
"""
# packages and modules
import logging, sys, re
from gensim import corpora, models
from gensim.models.coherencemodel import CoherenceModel
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from database import MongoDatabase
from helper_functions import *
class Evaluation():
def __init__(self):
logging.info('Initialized {}'.format(self.__class__.__name__))
# instantiate database
self.db = MongoDatabase()
def calculate_coherence(self, file_folder = os.path.join('files', 'lda'), models_folder = os.path.join('files', 'models')):
"""
Calculate the CV coherence score for each of the created LDA models
Parameters
----------
file_folder: os.path
location of the dictionary and corpus for gensim
models_folder: os.path
location where the lda model is saved
"""
logging.info('Start {}'.format(sys._getframe().f_code.co_name))
# read dictionary and corpus
dictionary, corpus = get_dic_corpus(file_folder)
# load bag of words features of each document from the database
texts = [x['tokens'] for x in self.db.read_collection('publications_raw')]
# get path location for models
M = [x for x in read_directory(models_folder) if x.endswith('lda.model')]
# read processed models from database
processed_models = ['{}-{}-{}-{}-{}'.format(x['k'], x['dir_prior'], x['random_state'], x['num_pass'], x['iteration']) for x in self.db.read_collection('coherence')]
# calculate coherence score for each model
for i, m in enumerate(M):
logging.info('Calculating coherence score: {}/{}'.format(i+1, len(M)))
print m
# number of topics
k = m.split(os.sep)[2]
# different dirichlet priors
dir_prior = m.split(os.sep)[3]
# random initiatilizations
random_state = m.split(os.sep)[4]
# passes over the corpus
num_pass = m.split(os.sep)[5]
# max iteration for convergence
iteration = m.split(os.sep)[6]
logging.info('k: {}, dir_prior: {}, random_state: {}, num_pass: {}, iteration: {}'.format(k, dir_prior, random_state, num_pass, iteration))
# check if coherence score already obtained
if '{}-{}-{}-{}-{}'.format(k, dir_prior, random_state, num_pass, iteration) not in processed_models:
# load LDA model
model = models.LdaModel.load(m)
# get coherence c_v score
coherence_c_v = CoherenceModel(model = model, texts = texts, dictionary = dictionary, coherence='c_v')
# get coherence score
score = coherence_c_v.get_coherence()
# logging output
logging.info('coherence score: {}'.format(score))
# save score to database
doc = { 'k' : k, 'dir_prior' : dir_prior, 'random_state' : random_state, 'num_pass' : num_pass, 'iteration' : iteration, 'coherence_score' : score}
self.db.insert_one_to_collection('coherence', doc)
else:
logging.info('coherence score already calculated, skipping ...')
continue
def plot_coherence(self, min_k = 2, max_k = 20, save_location = os.path.join('files', 'plots'), plot_save_name = 'coherence_scores_heatmap.pdf'):
"""
Read coherence scores from database and create heatmap to plot scores
Parameters
-----------
min_k: int
owest number of topics created when creating LDA models. Here 2
max_k: int
highest number of topics created when creating LDA models. Here 20
save_location: os.path
location where to save the plot
plot_save_name: string
name for the plot
"""
logging.info('Start {}'.format(sys._getframe().f_code.co_name))
# make sure plot save location exists
create_directory(save_location)
# read documents from database that contain coherence scores
D = list(self.db.read_collection(collection = 'coherence'))
# convert data from document into a list
data = [[int(x['k']), x['dir_prior'],x['random_state'], x['num_pass'], x['iteration'], x['coherence_score']] for x in D]
# create empty dataframe where we can store our scores
df = pd.DataFrame()
# loop trough values of k parameter and find relevant scores for each grid search combination
for k in range(min_k, max_k + 1):
# create dataframe to temporarily store values
df_temp = pd.DataFrame(index = [k])
# loop trough the data to obtain only the scores for a specific k value
for row in sorted(data):
if row[0] == k:
df_temp['{}-{}-{}-{}'.format(row[1],row[2],row[3],row[4])] = pd.Series(row[5], index=[k])
# append temporarary dataframe of only 1 k value to the full dataframe
df = df.append(df_temp)
# transpose the dataframe
df = df.transpose()
# plot the heatmap
ax = sns.heatmap(df, cmap = "Blues", annot = True, vmin = 0.500, vmax = 0.530, square = True, annot_kws = {"size": 11},
fmt = '.3f', linewidths = .5, cbar_kws = {'label': 'coherence score'})
# adjust the figure somewhat
ax.xaxis.tick_top()
plt.yticks(rotation=0)
plt.xticks(rotation=0, ha = 'left')
fig = ax.get_figure()
fig.set_size_inches(19, 6)
# save figure
fig.savefig(os.path.join(save_location, plot_save_name), bbox_inches='tight')
def output_lda_topics(self, K = 9, dir_prior = 'auto', random_state = 42, num_pass = 15, iteration = 200, top_n_words = 10, models_folder = os.path.join('files', 'models'),
save_folder = os.path.join('files', 'tables')):
"""
Create table with LDA topic words and probabilities
Creates a table of topic words and probabilties + topics in a list format
Values for K, dir_prior, random_state, num_pass and iteratrion will become visible when plotting the coherence score. Use the model that
achieved the highest coherence score and plug in the correct values. The values will create the correct file location of the LDA model
for example : files/models/2/auto/42/5/200/lda.model
Parameters
-----------
k: int
number of topics that resulted in the best decomposition of the underlying corpora
dir_prior: string
dirichlet priors 'auto', 'symmetric', 'asymmetric'
random_state: int
seed value for random initialization
num_pass: int
number of passes over the full corpus
iteration: int
max iterations for convergence
top_n_words: int
only print out the top N high probability words
models_folder: os.path
location of created LDA models
save_folder: os.path
location to store the tables
"""
logging.info('Start {}'.format(sys._getframe().f_code.co_name))
# load LDA model according to parameters
model = load_lda_model(os.path.join(models_folder, str(K), dir_prior, str(random_state), str(num_pass), str(iteration)))
# define empty lists so we can fill them with words
topic_table, topic_list = [], []
# loop trough all the topics found within K
for k in range(K):
# create topic header, e.g. (1) TOPIC X
topic_table.append(['{}'.format(get_topic_label(k, labels_available = False).upper())])
# add column for word and probability
topic_table.append(["word", "prob."])
list_string = ""
topic_string = ""
topic_string_list = []
# get topic distribution for topic k and return only top-N words
scores = model.print_topic(k, top_n_words).split("+")
# loop trough each word and probability
for score in scores:
# extract score and trimm spaces
score = score.strip()
# split on *
split_scores = score.split('*')
# get percentage
percentage = split_scores[0]
# get word
word = split_scores[1].strip('"')
# add word and percentage to table
topic_table.append([word.upper(), "" + percentage.replace("0.", ".")])
# add word to list table
list_string += word + ", "
# add empty line for the table
topic_table.append([""])
# add topic words to list
topic_list.append([str(k+1), list_string.rstrip(", ")])
# save to CSV
save_csv(topic_list, 'topic-list', folder = save_folder)
save_csv(topic_table, 'topic-table', folder = save_folder)
| [] |
2024-01-10 | brianpetro/gpt_index | gpt_index~indices~tree~leaf_query.py | """Leaf query mechanism."""
from typing import Dict, Optional, cast
from gpt_index.indices.base import BaseGPTIndexQuery
from gpt_index.indices.data_structs import IndexGraph, Node
from gpt_index.indices.utils import (
extract_numbers_given_response,
get_numbered_text_from_nodes,
get_sorted_node_list,
)
from gpt_index.langchain_helpers.chain_wrapper import openai_llm_predict
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import (
DEFAULT_QUERY_PROMPT,
DEFAULT_QUERY_PROMPT_MULTIPLE,
DEFAULT_REFINE_PROMPT,
DEFAULT_TEXT_QA_PROMPT,
)
class GPTTreeIndexLeafQuery(BaseGPTIndexQuery[IndexGraph]):
"""GPT Tree Index leaf query.
This class traverses the index graph and searches for a leaf node that can best
answer the query.
"""
def __init__(
self,
index_struct: IndexGraph,
query_template: Prompt = DEFAULT_QUERY_PROMPT,
query_template_multiple: Prompt = DEFAULT_QUERY_PROMPT_MULTIPLE,
text_qa_template: Prompt = DEFAULT_TEXT_QA_PROMPT,
refine_template: Prompt = DEFAULT_REFINE_PROMPT,
child_branch_factor: int = 1,
) -> None:
"""Initialize params."""
super().__init__(index_struct)
self.query_template = query_template
self.query_template_multiple = query_template_multiple
self.text_qa_template = text_qa_template
self.refine_template = refine_template
self.child_branch_factor = child_branch_factor
def _query_with_selected_node(
self,
selected_node: Node,
query_str: str,
prev_response: Optional[str] = None,
level: int = 0,
verbose: bool = False,
) -> str:
"""Get response for selected node.
If not leaf node, it will recursively call _query on the child nodes.
If prev_response is provided, we will update prev_response with the answer.
"""
if len(selected_node.child_indices) == 0:
cur_response, formatted_answer_prompt = openai_llm_predict(
self.text_qa_template,
context_str=selected_node.text,
query_str=query_str,
)
if verbose:
print(f">[Level {level}] answer prompt: {formatted_answer_prompt}")
print(f">[Level {level}] Current answer response: {cur_response} ")
else:
cur_response = self._query(
{
i: self.index_struct.all_nodes[i]
for i in selected_node.child_indices
},
query_str,
level=level + 1,
verbose=verbose,
)
if prev_response is None:
return cur_response
else:
context_msg = "\n".join([selected_node.text, cur_response])
cur_response, formatted_refine_prompt = openai_llm_predict(
self.refine_template,
query_str=query_str,
existing_answer=prev_response,
context_msg=context_msg,
)
if verbose:
print(f">[Level {level}] Refine prompt: {formatted_refine_prompt}")
print(f">[Level {level}] Current refined response: {cur_response} ")
return cur_response
def _query(
self,
cur_nodes: Dict[int, Node],
query_str: str,
level: int = 0,
verbose: bool = False,
) -> str:
"""Answer a query recursively."""
cur_node_list = get_sorted_node_list(cur_nodes)
if self.child_branch_factor == 1:
response, formatted_query_prompt = openai_llm_predict(
self.query_template,
num_chunks=len(cur_node_list),
query_str=query_str,
context_list=get_numbered_text_from_nodes(cur_node_list),
)
else:
response, formatted_query_prompt = openai_llm_predict(
self.query_template_multiple,
num_chunks=len(cur_node_list),
query_str=query_str,
context_list=get_numbered_text_from_nodes(cur_node_list),
branching_factor=self.child_branch_factor,
)
if verbose:
print(f">[Level {level}] current prompt template: {formatted_query_prompt}")
numbers = extract_numbers_given_response(response, n=self.child_branch_factor)
if numbers is None:
if verbose:
print(
f">[Level {level}] Could not retrieve response - no numbers present"
)
# just join text from current nodes as response
return response
result_response = None
for number_str in numbers:
number = int(number_str)
if number > len(cur_node_list):
if verbose:
print(
f">[Level {level}] Invalid response: {response} - "
f"number {number} out of range"
)
return response
# number is 1-indexed, so subtract 1
selected_node = cur_node_list[number - 1]
print(
f">[Level {level}] Selected node: "
f"[{number}]/[{','.join([str(int(n)) for n in numbers])}]"
)
print(
f">[Level {level}] Node "
f"[{number}] Summary text: {' '.join(selected_node.text.splitlines())}"
)
result_response = self._query_with_selected_node(
selected_node,
query_str,
prev_response=result_response,
level=level,
verbose=verbose,
)
# result_response should not be None
return cast(str, result_response)
def query(self, query_str: str, verbose: bool = False) -> str:
"""Answer a query."""
print(f"> Starting query: {query_str}")
return self._query(
self.index_struct.root_nodes, query_str, level=0, verbose=verbose
).strip()
| [] |
2024-01-10 | brianpetro/gpt_index | gpt_index~indices~keyword_table~freq_query.py | """Query for GPTKeywordTableIndex."""
from collections import defaultdict
from typing import Dict, Optional
from gpt_index.indices.base import BaseGPTIndexQuery
from gpt_index.indices.data_structs import KeywordTable
from gpt_index.indices.response_utils import give_response, refine_response
from gpt_index.indices.utils import extract_keywords_given_response, truncate_text
from gpt_index.langchain_helpers.chain_wrapper import openai_llm_predict
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import (
DEFAULT_KEYWORD_EXTRACT_TEMPLATE,
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE,
DEFAULT_REFINE_PROMPT,
DEFAULT_TEXT_QA_PROMPT,
)
DQKET = DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE
class GPTKeywordTableIndexFreqQuery(BaseGPTIndexQuery[KeywordTable]):
"""GPT Keyword Table Index Frequency Query."""
def __init__(
self,
index_struct: KeywordTable,
keyword_extract_template: Prompt = DEFAULT_KEYWORD_EXTRACT_TEMPLATE,
query_keyword_extract_template: Optional[Prompt] = DQKET,
refine_template: Prompt = DEFAULT_REFINE_PROMPT,
text_qa_template: Prompt = DEFAULT_TEXT_QA_PROMPT,
max_keywords_per_query: int = 10,
num_chunks_per_query: int = 10,
) -> None:
"""Initialize params."""
super().__init__(index_struct=index_struct)
self.max_keywords_per_query = max_keywords_per_query
self.num_chunks_per_query = num_chunks_per_query
self.keyword_extract_template = keyword_extract_template
if query_keyword_extract_template is None:
self.query_keyword_extract_template = keyword_extract_template
else:
self.query_keyword_extract_template = query_keyword_extract_template
self.refine_template = refine_template
self.text_qa_template = text_qa_template
def _query_with_chunk(
self,
text_chunk: str,
query_str: str,
result_response: Optional[str] = None,
verbose: bool = False,
) -> str:
"""Query with a keyword."""
if result_response is None:
return give_response(
query_str,
text_chunk,
text_qa_template=self.text_qa_template,
refine_template=self.refine_template,
verbose=verbose,
)
else:
return refine_response(
result_response,
query_str,
text_chunk,
refine_template=self.refine_template,
verbose=verbose,
)
def query(self, query_str: str, verbose: bool = False) -> str:
"""Answer a query."""
print(f"> Starting query: {query_str}")
response, _ = openai_llm_predict(
self.query_keyword_extract_template,
max_keywords=self.max_keywords_per_query,
question=query_str,
)
keywords = extract_keywords_given_response(
response, self.max_keywords_per_query
)
# go through text chunks in order of most matching keywords
chunk_indices_count: Dict[int, int] = defaultdict(int)
keywords = [k for k in keywords if k in self.index_struct.keywords]
print(f"Extracted keywords: {keywords}")
for k in keywords:
for text_chunk_idx in self.index_struct.table[k]:
chunk_indices_count[text_chunk_idx] += 1
sorted_chunk_indices = sorted(
list(chunk_indices_count.keys()),
key=lambda x: chunk_indices_count[x],
reverse=True,
)
sorted_chunk_indices = sorted_chunk_indices[: self.num_chunks_per_query]
result_response = None
for text_chunk_idx in sorted_chunk_indices:
fmt_text_chunk = truncate_text(
self.index_struct.text_chunks[text_chunk_idx], 50
)
print(f"> Querying with idx: {text_chunk_idx}: {fmt_text_chunk}")
result_response = self._query_with_chunk(
self.index_struct.text_chunks[text_chunk_idx],
query_str,
result_response=result_response,
verbose=verbose,
)
return result_response or "Empty response"
| [] |
2024-01-10 | brianpetro/gpt_index | gpt_index~langchain_helpers~chain_wrapper.py | """Wrapper functions around an LLM chain."""
from typing import Any, Dict, Optional, Tuple
from langchain import LLMChain, OpenAI
from gpt_index.prompts.base import Prompt
def openai_llm_predict(
prompt: Prompt, llm_args_dict: Optional[Dict] = None, **prompt_args: Any
) -> Tuple[str, str]:
"""Predict using OpenAI LLM with a prompt string.
Also return the formatted prompt.
"""
llm_args_dict = llm_args_dict or {}
llm = OpenAI(temperature=0, **llm_args_dict)
llm_chain = LLMChain(prompt=prompt, llm=llm)
formatted_prompt = prompt.format(**prompt_args)
full_prompt_args = prompt.get_full_format_args(prompt_args)
return llm_chain.predict(**full_prompt_args), formatted_prompt
| [] |
2024-01-10 | Ahelsinger/somepythoncontent | contentcreation006.py |
import openai
import csv
import time
openai.api_key = ""
def generate_rhyming_story(prompt):
completions = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
message = completions.choices[0].text
return message
# Prompt for the rhyming story
prompt = "Write a rhyming children's story about a magical unicorn. Make it 200 words, and make each sentence rhyme"
# Generate the rhyming story
rhyming_story = generate_rhyming_story(prompt)
# Split the story into stanzas
stanzas = rhyming_story.split("\n")
# Create a list of field names for the CSV file
fieldnames = []
for i in range(1, len(stanzas)+1):
fieldnames.append("Verse " + str(i))
# Create a human readable timestamp
timestamp = time.strftime("%Y-%m-%d_%H-%M-%S", time.gmtime())
# Create a CSV file and write the stanzas as columns
filename = "rhyming_story_" + timestamp + ".csv"
with open(filename, 'w', newline='') as file:
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
row = {}
for i in range(len(stanzas)):
row[fieldnames[i]] = stanzas[i]
writer.writerow(row)
| [
"Write a rhyming children's story about a magical unicorn. Make it 200 words, and make each sentence rhyme"
] |
2024-01-10 | Rohith-2/customLLM | ingest.py.bak | #!/usr/bin/env python3
import os
import glob
from typing import List
from dotenv import load_dotenv
from multiprocessing import Pool
from tqdm import tqdm
from langchain.document_loaders import (
CSVLoader,
EverNoteLoader,
PDFMinerLoader,
TextLoader,
UnstructuredEmailLoader,
UnstructuredEPubLoader,
UnstructuredHTMLLoader,
UnstructuredMarkdownLoader,
UnstructuredODTLoader,
UnstructuredPowerPointLoader,
UnstructuredWordDocumentLoader,
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.docstore.document import Document
from constants import CHROMA_SETTINGS
load_dotenv()
# Load environment variables
persist_directory = os.environ.get('PERSIST_DIRECTORY')
persist_directory = r"C:\Users\gbukspp-sis005\Documents\privateGPT-main\db"
source_directory = os.environ.get('SOURCE_DIRECTORY', 'source_documents')
embeddings_model_name = 'all-MiniLM-L6-v2'
chunk_size = 500
chunk_overlap = 50
# Custom document loaders
class MyElmLoader(UnstructuredEmailLoader):
"""Wrapper to fallback to text/plain when default does not work"""
def load(self) -> List[Document]:
"""Wrapper adding fallback for elm without html"""
try:
try:
doc = UnstructuredEmailLoader.load(self)
except ValueError as e:
if 'text/html content not found in email' in str(e):
# Try plain text
self.unstructured_kwargs["content_source"]="text/plain"
doc = UnstructuredEmailLoader.load(self)
else:
raise
except Exception as e:
# Add file_path to exception message
raise type(e)(f"{self.file_path}: {e}") from e
return doc
# Map file extensions to document loaders and their arguments
LOADER_MAPPING = {
".csv": (CSVLoader, {}),
# ".docx": (Docx2txtLoader, {}),
".doc": (UnstructuredWordDocumentLoader, {}),
".docx": (UnstructuredWordDocumentLoader, {}),
".enex": (EverNoteLoader, {}),
".eml": (MyElmLoader, {}),
".epub": (UnstructuredEPubLoader, {}),
".html": (UnstructuredHTMLLoader, {}),
".md": (UnstructuredMarkdownLoader, {}),
".odt": (UnstructuredODTLoader, {}),
".pdf": (PDFMinerLoader, {}),
".ppt": (UnstructuredPowerPointLoader, {}),
".pptx": (UnstructuredPowerPointLoader, {}),
".txt": (TextLoader, {"encoding": "utf8"}),
# Add more mappings for other file extensions and loaders as needed
}
def load_single_document(file_path: str) -> Document:
ext = "." + file_path.rsplit(".", 1)[-1]
if ext in LOADER_MAPPING:
loader_class, loader_args = LOADER_MAPPING[ext]
loader = loader_class(file_path, **loader_args)
return loader.load()[0]
raise ValueError(f"Unsupported file extension '{ext}'")
def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:
"""
Loads all documents from the source documents directory, ignoring specified files
"""
all_files = []
for ext in LOADER_MAPPING:
all_files.extend(
glob.glob(os.path.join(source_dir, f"**/*{ext}"), recursive=True)
)
filtered_files = [file_path for file_path in all_files if file_path not in ignored_files]
with Pool(processes=os.cpu_count()) as pool:
results = []
with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
for i, doc in enumerate(pool.imap_unordered(load_single_document, filtered_files)):
results.append(doc)
pbar.update()
return results
def process_documents(ignored_files: List[str] = []) -> List[Document]:
"""
Load documents and split in chunks
"""
print(f"Loading documents from {source_directory}")
documents = load_documents(source_directory, ignored_files)
if not documents:
print("No new documents to load")
exit(0)
print(f"Loaded {len(documents)} new documents from {source_directory}")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
texts = text_splitter.split_documents(documents)
print(f"Split into {len(texts)} chunks of text (max. {chunk_size} tokens each)")
return texts
def does_vectorstore_exist(persist_directory: str) -> bool:
"""
Checks if vectorstore exists
"""
if os.path.exists(os.path.join(persist_directory, 'index')):
if os.path.exists(os.path.join(persist_directory, 'chroma-collections.parquet')) and os.path.exists(os.path.join(persist_directory, 'chroma-embeddings.parquet')):
list_index_files = glob.glob(os.path.join(persist_directory, 'index/*.bin'))
list_index_files += glob.glob(os.path.join(persist_directory, 'index/*.pkl'))
# At least 3 documents are needed in a working vectorstore
if len(list_index_files) > 3:
return True
return False
def main():
# Create embeddings
#embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
embeddings = HuggingFaceEmbeddings()
if does_vectorstore_exist(persist_directory):
# Update and store locally vectorstore
print(f"Appending to existing vectorstore at {persist_directory}")
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
collection = db.get()
texts = process_documents([metadata['source'] for metadata in collection['metadatas']])
print(f"Creating embeddings. May take some minutes...")
db.add_documents(texts)
else:
# Create and store locally vectorstore
print("Creating new vectorstore")
texts = process_documents()
print(f"Creating embeddings. May take some minutes...")
db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory, client_settings=CHROMA_SETTINGS)
db.persist()
db = None
print(f"Ingestion complete! You can now run privateGPT.py to query your documents")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | Lafifi-24/HotelFinderAssistant | tests~automated_tests~test_memory.py | import os
import boto3
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from Application.memory import Memory
OPENAI_API_key=os.getenv('OPENAI_API_key')
llm = ChatOpenAI(openai_api_key = OPENAI_API_key)
prompt = ChatPromptTemplate(
messages=[
SystemMessagePromptTemplate.from_template("you are an assistant"),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{human_message}"),
]
)
class TestMemory():
def test_creation(self):
memory = Memory(memory_type="DynamoDB")
assert memory.memory_type == "DynamoDB"
assert memory.SessionTable_name == "SessionTable"
dynamodb = boto3.resource("dynamodb")
assert memory.SessionTable_name in [table.name for table in dynamodb.tables.all()]
def test_save_and_load_data(self):
memory = Memory(memory_type="DynamoDB")
messages = ["hello","how are you"]
conversation_memory = ConversationBufferMemory(memory_key="chat_history",return_messages=True, max_history_length=20)
chatbot = LLMChain(llm=llm, prompt=prompt, memory = conversation_memory, verbose=False)
for message in messages:
chatbot.memory.chat_memory.add_ai_message(message)
memory.save_history_in_dynamodb(chatbot, chat_id="1")
chat_memory = memory.load_history_from_dynamodb(chat_id="1")[0]
conversation_memory1 = ConversationBufferMemory(memory_key="chat_history",return_messages=True,chat_memory=chat_memory, max_history_length=20)
chatbot1 = LLMChain(llm=llm, prompt=prompt, memory = conversation_memory1, verbose=False)
assert chatbot1.memory.chat_memory.messages[0].content == "hello"
assert chatbot1.memory.chat_memory.messages[1].content == "how are you"
| [
"{human_message}",
"chat_history",
"you are an assistant"
] |
2024-01-10 | INK-USC/CommonGen | methods~T5~transformer_local~src~transformers~modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
import json
import logging
import math
import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .activations import gelu_new, swish
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import Conv1D, PreTrainedModel, SequenceSummary, prune_conv1d_layer
logger = logging.getLogger(__name__)
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"
}
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
if ".ckpt" in openai_checkpoint_folder_path:
openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
logger.info("Loading weights from {}".format(openai_checkpoint_folder_path))
with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle:
names = json.load(names_handle)
with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle:
shapes = json.load(shapes_handle)
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + "/params_{}.npy".format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "w":
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu_new}
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.output_attentions = config.output_attentions
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = set(heads) - self.pruned_heads
for head in heads:
head -= sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
nx = config.n_embd
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x, attention_mask=None, head_mask=None):
attn_outputs = self.attn(x, attention_mask=attention_mask, head_mask=head_mask)
a = attn_outputs[0]
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
outputs = [h] + attn_outputs[1:]
return outputs
class OpenAIGPTPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
pretrained_model_archive_map = OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_openai_gpt
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
OPENAI_GPT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.OpenAIGPTTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.init_weights()
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, new_embeddings):
self.tokens_embed = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTModel.from_pretrained('openai-gpt')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
# Code is different from when we had a single embedding matrice from position and token embeddings
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(input_shape[-1], dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_attentions = ()
all_hidden_states = ()
for i, block in enumerate(self.h):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(hidden_states, attention_mask, head_mask[i])
hidden_states = outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = (hidden_states.view(*output_shape),)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (all hidden states), (all attentions)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, (all hidden states), (all attentions)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
lm_labels=None,
mc_labels=None,
):
r"""
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`)
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`, defaults to :obj:`None`)
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``lm_labels`` is provided):
Language modeling loss.
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`multiple_choice_labels` is provided):
Multiple choice classification loss.
lm_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTDoubleHeadsModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
tokenizer.add_special_tokens({'cls_token': '[CLS]'}) # Add a [CLS] to the vocabulary (we should train it also!)
model.resize_token_embeddings(len(tokenizer))
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
mc_token_ids = torch.tensor([input_ids.size(-1)-1, input_ids.size(-1)-1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
outputs = (loss,) + outputs
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, (all hidden_states), (attentions)
| [] |
2024-01-10 | INK-USC/CommonGen | methods~T5~transformer_local~src~transformers~modeling_tf_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 OpenAI GPT model."""
import logging
import numpy as np
import tensorflow as tf
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import (
TFConv1D,
TFPreTrainedModel,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
shape_list,
)
from .tokenization_utils import BatchEncoding
logger = logging.getLogger(__name__)
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-tf_model.h5"
}
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
return x * tf.math.sigmoid(x)
ACT_FNS = {
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
}
class TFAttention(tf.keras.layers.Layer):
def __init__(self, nx, n_ctx, config, scale=False, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn")
self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj")
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
pass
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:, None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, inputs, training=False):
q, k, v, attention_mask, head_mask = inputs
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(shape_list(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [tf.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, inputs, training=False):
x, attention_mask, head_mask = inputs
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
attn_outputs = self._attn([query, key, value, attention_mask, head_mask], training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class TFMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, **kwargs):
super().__init__(**kwargs)
nx = config.n_embd
self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc")
self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj")
self.act = gelu
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2
class TFBlock(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, scale=False, **kwargs):
super().__init__(**kwargs)
nx = config.n_embd
self.attn = TFAttention(nx, n_ctx, config, scale, name="attn")
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
self.mlp = TFMLP(4 * nx, config, name="mlp")
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2")
def call(self, inputs, training=False):
x, attention_mask, head_mask = inputs
output_attn = self.attn([x, attention_mask, head_mask], training=training)
a = output_attn[0] # output_attn: a, (attentions)
n = self.ln_1(x + a)
m = self.mlp(n, training=training)
h = self.ln_2(n + m)
outputs = [h] + output_attn[1:]
return outputs # x, (attentions)
class TFOpenAIGPTMainLayer(tf.keras.layers.Layer):
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.num_hidden_layers = config.n_layer
self.vocab_size = config.vocab_size
self.n_embd = config.n_embd
self.tokens_embed = TFSharedEmbeddings(
config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="tokens_embed"
)
self.positions_embed = tf.keras.layers.Embedding(
config.n_positions,
config.n_embd,
embeddings_initializer=get_initializer(config.initializer_range),
name="positions_embed",
)
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFBlock(config.n_ctx, config, scale=True, name="h_._{}".format(i)) for i in range(config.n_layer)]
def get_input_embeddings(self):
return self.tokens_embed
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, (dict, BatchEncoding)):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
position_ids = tf.range(input_shape[-1], dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
else:
attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids, mode="embedding")
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.tokens_embed(token_type_ids, mode="embedding")
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
all_attentions = []
all_hidden_states = ()
for i, block in enumerate(self.h):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, attention_mask, head_mask[i]], training=training)
hidden_states = outputs[0]
if self.output_attentions:
all_attentions.append(outputs[1])
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (all hidden_states), (attentions)
class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
pretrained_model_archive_map = TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
OPENAI_GPT_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.GPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputing raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
hidden_states (:obj:`tuple(tf.Tensor)` `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.transformer(inputs, **kwargs)
return outputs
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
def get_output_embeddings(self):
return self.transformer.tokens_embed
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTLMHeadModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
outputs = (lm_logits,) + transformer_outputs[1:]
return outputs # lm_logits, (all hidden_states), (attentions)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
config.num_labels = 1
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
self.multiple_choice_head = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="multiple_choice_head"
)
def get_output_embeddings(self):
return self.transformer.tokens_embed
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
training=False,
):
r"""
mc_token_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
lm_prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# For example purposes. Not runnable.
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
# Add a [CLS] to the vocabulary (we should train it also!)
# This option is currently not implemented in TF 2.0
raise NotImplementedError
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = tf.constant([tokenizer.encode(s) for s in choices])[None, :] # Batch size 1, 2 choices
mc_token_ids = tf.constant([input_ids.size(-1), input_ids.size(-1)])[None, :] # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
mc_token_ids = inputs[6] if len(inputs) > 6 else mc_token_ids
assert len(inputs) <= 7, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
mc_token_ids = inputs.get("mc_token_ids", mc_token_ids)
assert len(inputs) <= 7, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None:
input_shapes = shape_list(input_ids)
else:
input_shapes = shape_list(inputs_embeds)[:-1]
seq_length = input_shapes[-1]
flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
flat_inputs = [
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
head_mask,
inputs_embeds,
]
transformer_outputs = self.transformer(flat_inputs, training=training)
hidden_states = transformer_outputs[0]
hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
mc_logits = self.multiple_choice_head([hidden_states, mc_token_ids], training=training)
mc_logits = tf.squeeze(mc_logits, axis=-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
return outputs # lm logits, mc logits, (all hidden_states), (attentions)
| [] |
2024-01-10 | anshss/constellation | python-apis~videogen.py | from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import replicate
import os
from dotenv import load_dotenv
from openai import OpenAI
import boto3
from io import BytesIO
from urllib.parse import quote, urlparse
from pydub import AudioSegment
from ibm_watson import TextToSpeechV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
app = Flask(__name__)
CORS(app, resources={
r"/*"
})
app.config['CORS_HEADERS'] = 'Content-Type'
# Load environment variables
load_dotenv()
# OpenAI setup
api_key = os.environ.get("OPENAI_API_KEY")
client = OpenAI()
# IBM Watson setup
apikey = os.environ.get("watson_apikey")
url = os.environ.get("watson_url")
authenticator = IAMAuthenticator(apikey)
tts = TextToSpeechV1(authenticator=authenticator)
tts.set_service_url(url)
# AWS S3 setup
s3 = boto3.client(
's3',
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"),
)
s3_bucket_name = "bucketforadgen"
def url_to_uri(url):
parsed_url = urlparse(url)
uri = parsed_url.scheme + "://" + parsed_url.netloc + quote(parsed_url.path)
if parsed_url.query:
uri += quote("?" + parsed_url.query)
if parsed_url.fragment:
uri += quote("#" + parsed_url.fragment)
return uri
def audiogen(product_name, product_description, model_gender):
script = generate_script(product_name, product_description)
try:
s3_audio = None
if model_gender == "female":
with open('./generated-audio-female.mp3', 'wb') as audio_file:
response = tts.synthesize(script, accept='audio/mp3', voice='en-US_AllisonV3Voice').get_result()
generated_audio = response.content
audio_file.write(generated_audio)
elif model_gender == "male":
with open('./generated-audio-male.mp3', 'wb') as audio_file:
response = tts.synthesize(script, accept='audio/mp3', voice='en-US_HenryV3Voice').get_result()
generated_audio = response.content
if response.status_code != 200:
print(f"Error synthesizing audio. Status code: {response.status_code}")
audio_file.write(generated_audio)
# Convert the generated audio from MP3 to WAV
audio = AudioSegment.from_mp3(f'./generated-audio-{model_gender}.mp3')
audio.export(f'./generated-audio-{model_gender}.wav', format='wav')
# Read the converted WAV file
with open(f'./generated-audio-{model_gender}.wav', 'rb') as wav_file:
audio_bytes = BytesIO(wav_file.read())
s3_audio = upload_audio_to_s3(audio_bytes, product_name)
except Exception as e:
print(f"Error generating or uploading audio: {e}")
return s3_audio
def upload_audio_to_s3(audio_bytes, product_name):
try:
product_name_cleaned = product_name.replace(" ", "_")
s3_bucket_name = 'bucketforadgen'
s3_key = f"{product_name_cleaned}_generated_audio.mp3"
# Upload the audio to S3
s3.put_object(Body=audio_bytes, Bucket=s3_bucket_name, Key=s3_key, ContentType='audio/mpeg')
audio_public_url = f'https://{s3_bucket_name}.s3.amazonaws.com/{s3_key}'
return audio_public_url
except Exception as e:
print(f"Error uploading audio to S3: {e}")
raise e
def generate_script(product_name, product_description):
script_prompt = f"Create a short catchy advertisement script for a product named {product_name}. Description: {product_description}"
script_response = client.completions.create(
model="text-davinci-003",
prompt=script_prompt,
max_tokens=50
)
script = script_response.choices[0].text.strip().replace('"', '')
script = [line.replace('\n', '') for line in script]
script = ''.join(str(line) for line in script)
print(f"Script: {script}")
return script
@app.route("/generate-vid/", methods=["POST"])
@cross_origin(allow_headers=['Content-Type'])
def generate_video():
try:
data = request.get_json()
model_image = data.get('model_img')
product_name = data.get('product_name')
product_description = data.get('product_description')
model_gender = data.get('model_gender')
print(f"Model Gender: {model_gender}")
# Use audiogen to get the S3 audio URL
model_voice = audiogen(product_name, product_description, model_gender)
model_image_encoded = url_to_uri(model_image)
print(f"Model Image URI: {model_image_encoded}")
# Pass the S3 audio URL to replicate.run
output = replicate.run(
"cjwbw/sadtalker:3aa3dac9353cc4d6bd62a8f95957bd844003b401ca4e4a9b33baa574c549d376",
input={
"still": True,
"enhancer": "gfpgan",
"preprocess": "full",
"driven_audio": model_voice, # Pass the S3 audio URL here
"source_image": model_image_encoded
}
)
print(output)
return jsonify({"result": output}), 200
except Exception as e:
print(f"Error generating video: {e}")
return jsonify({"error": str(e)}), 500
if __name__ == "__main__":
app.run(debug=True) | [
"Create a short catchy advertisement script for a product named PLACEHOLDER. Description: PLACEHOLDER"
] |
2024-01-10 | anshss/constellation | python-apis~modv2.py | from openai import OpenAI
from flask_cors import CORS
import requests
import os
import boto3
from dotenv import load_dotenv
from io import BytesIO
from PIL import Image
load_dotenv()
s3 = boto3.client(
's3',
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"),
)
client = OpenAI()
def generate_model_img(model_description):
try:
dalle_api_prompt = f"Generate a realistic image of a model captured with a Nikon D850 and a Nikon AF-S NIKKOR 70-200mm f/2.8E FL ED VR lens, lit with high-key lighting to create a soft and ethereal feel, with a shallow depth of field --ar 2:3- with the following attributes: {model_description}"
dalle_response = client.images.generate(
model="dall-e-3",
prompt=dalle_api_prompt,
size="1024x1024",
quality="hd",
n=1,
)
image_content = BytesIO(requests.get(dalle_response.data[0].url).content)
upload_to_s3(image_content, model_description)
except Exception as e:
print(f"Error: {e}")
def upload_to_s3(image_content, model_description):
try:
model_description_cleaned = model_description.replace(" ", "_")
s3_bucket_name = 'bucketforadgen'
s3_key = f"{model_description_cleaned}_model_img.png"
s3.put_object(Body=image_content.getvalue(), Bucket=s3_bucket_name, Key=s3_key,ContentType='image/png')
s3_public_url = f'https://{s3_bucket_name }.s3.amazonaws.com/{s3_key}'
print(f"Public URL for the image: {s3_public_url}")
return s3_public_url
except Exception as e:
print(f"Error uploading to S3: {e}")
raise e
if __name__ == "__main__":
model_description = "realistic "
generate_model_img(model_description) | [
"Generate a realistic image of a model captured with a Nikon D850 and a Nikon AF-S NIKKOR 70-200mm f/2.8E FL ED VR lens, lit with high-key lighting to create a soft and ethereal feel, with a shallow depth of field --ar 2:3- with the following attributes: realistic "
] |
2024-01-10 | milk333445/Automatic_code_writing_assistant | pages~3_CodeReview.py | import streamlit as st
from langchain import OpenAI
from langchain import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from serpapi import GoogleSearch
from langchain.document_loaders import WebBaseLoader
import re
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
from langchain.schema import Document
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
import numpy as np
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, OpenAI, LLMChain
import os
import subprocess
import shutil
##相關函數
import ast
#parser
class CodeParser:
@classmethod
def parse_block(cls, block: str, text: str) -> str:
blocks = cls.parse_blocks(text)
for k, v in blocks.items():
if block in k:
return v
return ""
@classmethod
def parse_blocks(cls, text: str):
#根據文本切割成多個塊
blocks = text.split('##')
#創建字典儲存每個block的標題跟內容
block_dict = {}
for block in blocks:
#如果block不為空則繼續處理
if block.strip() != "":
block_title, block_content = block.split('\n', 1)
block_dict[block_title.strip()] = block_content.strip()
return block_dict
@classmethod
def parse_code(cls, block: str, text: str, lang: str = "") -> str:
if block:
text = cls.parse_block(block, text)
pattern = rf'```{lang}.*?\s+(.*?)```'
match = re.search(pattern, text, re.DOTALL)
if match:
code = match.group(1)
else:
raise Exception(f"Error parsing code block: {block}")
return code
@classmethod
def parse_str(cls, block: str, text: str, lang: str = ""):
code = cls.parse_code(block, text, lang)
code = code.split("=")[-1]
code = code.strip().strip("'").strip("\"")
return code
@classmethod
def parse_file_list(cls, block: str, text: str, lang: str = "")->list[str]:
code = cls.parse_code(block, text, lang)
pattern = r'\s*(.*=.*)?(\[.*\])'
match = re.search(pattern, code, re.DOTALL)
if match:
tasks_list_str = match.group(2)
tasks = ast.literal_eval(tasks_list_str)
else:
raise Exception
return tasks
class OutputParser:
@classmethod
def parse_blocks(cls, text: str):
# 首先根据"##"将文本分割成不同的block
blocks = text.split("##")
# 创建一个字典,用于存储每个block的标题和内容
block_dict = {}
# 遍历所有的block
for block in blocks:
# 如果block不为空,则继续处理
if block.strip() != "":
# 将block的标题和内容分开,并分别去掉前后的空白字符
block_title, block_content = block.split("\n", 1)
# LLM可能出错,在这里做一下修正
if block_title[-1] == ":":
block_title = block_title[:-1]
block_dict[block_title.strip()] = block_content.strip()
return block_dict
@classmethod
def parse_code(cls, text: str, lang: str = "") -> str:
pattern = rf'```{lang}.*?\s+(.*?)```'
match = re.search(pattern, text, re.DOTALL)
if match:
code = match.group(1)
else:
raise Exception
return code
@classmethod
def parse_str(cls, text: str):
text = text.split("=")[-1]
text = text.strip().strip("'").strip("\"")
return text
@classmethod
def parse_file_list(cls, text: str) -> list[str]:
# Regular expression pattern to find the tasks list.
pattern = r'\s*(.*=.*)?(\[.*\])'
# Extract tasks list string using regex.
match = re.search(pattern, text, re.DOTALL)
if match:
tasks_list_str = match.group(2)
# Convert string representation of list to a Python list using ast.literal_eval.
tasks = ast.literal_eval(tasks_list_str)
else:
tasks = text.split("\n")
return tasks
def code_rewiew_single(filename, logic_analysis, shared_knowledge, execution_results, code):
PROMPT_TEMPLATE = """
NOTICE
Role: You are a professional software engineer, and your main task is to review the code. You need to ensure that the code conforms to the PEP8 standards, is elegantly designed and modularized, easy to read and maintain, and is written in Python 3.10 (or in another programming language).
ATTENTION: Use '##' to SPLIT SECTIONS, not '#'. Output format carefully referenced "Format example".
## Code Review: Based on the following context and code, and following the check list, Provide key, clear, concise, and specific code modification suggestions, up to 7.
```
1. Check 0: Is the code implemented as per the requirements?
2. Check 1: Are there any issues with the code logic?
3. Check 2: Does the existing code follow the "Data structures and interface definitions"?
4. Check 3: Is there a function in the code that is omitted or not fully implemented that needs to be implemented?
5. Check 4: Does the code have unnecessary or lack dependencies?
6. Check 5: Does the code match the rest of the code?
7. Check 6: How to fix the code if there is any error?
```
## Rewrite Code: {filename} Base on "Code Review" and the source code and the code Execution Results , rewrite code with triple quotes. Do your utmost to optimize THIS SINGLE FILE.
-----
## here is the logic analysis of the code:
{logic_analysis}
## here is the shared knowledge of the code:
{shared_knowledge}
## here is the code execution results:
filename:{filename}
{execution_results}
if there is any error, please check the code and modify it.
If any function or class is not defined, define it.
## the code that you have to review and rewrite:
filename:{filename}
{code}
----
## Format example
-----
{format_example}
-----
"""
FORMAT_EXAMPLE = """
## Code Review
1. The code ...
2. ...
3. ...
4. ...
5. ...
## Rewrite Code: {filename}
```python
...
```
"""
llm = ChatOpenAI(temperature =0.5, model_name = "gpt-3.5-turbo-16k")
prompt = PromptTemplate(template=PROMPT_TEMPLATE, input_variables=["filename", "logic_analysis", "shared_knowledge", "execution_results", "code", "format_example"])
chain = LLMChain(llm=llm, prompt=prompt)
raw_code_review = chain.run(filename = filename, logic_analysis = logic_analysis, shared_knowledge = shared_knowledge, execution_results = execution_results, code = code, format_example = FORMAT_EXAMPLE)
print(raw_code_review)
return raw_code_review
#引入變數
if "code_history" not in st.session_state:
st.session_state["code_history"] = ""
if "main_task" not in st.session_state:
st.session_state["main_task"] = ""
if "data_api_design" not in st.session_state:
st.session_state["data_api_design"] = ""
if "file_list" not in st.session_state:
st.session_state["file_list"] = ""
if "main_mission" not in st.session_state:
st.session_state["main_mission"] = ""
with st.sidebar:
openai_api_key = st.text_input('OpenAI API Key', '', type="password")
os.environ['OPENAI_API_KEY'] = openai_api_key
st.title("Code Review")
st.info(f"最後相關程式碼會幫您整理到資料夾 workplace 中,請等我撰寫完成後至資料夾中查看相關程式檔。")
if st.button("下載相關套件"):
with st.spinner('Download...'):
install_success = {} # 用來記錄每個套件的安裝結果
for package in st.session_state["python_package_name"]:
result = subprocess.run(["pip", "install", package], capture_output=True, text=True)
if result.returncode == 0:
install_success[package] = "成功"
else:
install_success[package] = "失敗:" + result.stderr.strip()
with st.expander("安裝結果"):
for package, status in install_success.items():
st.write(f"安裝{package}: {status}")
if st.button("進行Code Review"):
with st.spinner('Code Reviewing...'):
folder_name = "workplace"
# 檢查資料夾是否存在
if os.path.exists(folder_name):
# 如果資料夾存在,刪除資料夾及其內容
shutil.rmtree(folder_name)
os.mkdir(folder_name)
#執行程式檔
folder_path = os.path.join(os.getcwd(), "first_version")
execution_results = {}
for file_name in reversed(st.session_state["file_list"]):
file_path = os.path.join(folder_path, file_name)
result = subprocess.run(['python', file_path], capture_output=True, text=True)
execution_results[file_name] = {
'returncode': result.returncode,
'stdout': result.stdout,
'stderr': result.stderr
}
#輸出結果
for snippet in st.session_state["code_history"]:
code_review_object = ""
code_review_object += f"```python\n{snippet.code}\n```\n"
execution_result_str = ""
execution_result_str += f'returncode: {execution_results[snippet.title]["returncode"]}\n'
execution_result_str += f'stdout: {execution_results[snippet.title]["stdout"]}\n'
execution_result_str += f'stderr: {execution_results[snippet.title]["stderr"]}\n'
#進行code review
print('開始')
raw_code_review = code_rewiew_single(snippet.title, st.session_state["logic_analysis"], st.session_state["share_knowledge"], execution_result_str, code_review_object)
print('結束')
code_review_content = raw_code_review.split("##")[1]
#存起來
st.session_state[f'{snippet.title}_Code_Review'] = code_review_content
pure_code_after_review = CodeParser.parse_code(block="Rewrite Code", text=raw_code_review)
with st.expander(f"## Code Review: {snippet.title}"):
st.write(code_review_content)
st.write("## Rewrite Code")
st.code(pure_code_after_review, language="python")
#更新code
snippet.code = pure_code_after_review
# 將程式碼寫入 Python 檔案
file_path_final = os.path.join(os.path.join(os.getcwd(), "workplace"), snippet.title)
with open(file_path_final, "w") as f:
f.write(snippet.code)
if st.button("查看歷史紀錄"):
st.write("相關程式碼最後會幫您整理到資料夾中,請至資料夾中查看。")
for snippet in st.session_state["code_history"]:
with st.expander(f"## {snippet.title}"):
#codereview
if f'{snippet.title}_Code_Review' not in st.session_state:
st.session_state[f'{snippet.title}_Code_Review'] = ""
st.write(st.session_state[f'{snippet.title}_Code_Review'])
st.code(snippet.code, language="python") | [
"logic_analysis",
"shared_knowledge",
"\n NOTICE\n Role: You are a professional software engineer, and your main task is to review the code. You need to ensure that the code conforms to the PEP8 standards, is elegantly designed and modularized, easy to read and maintain, and is written in Python 3.10 (or in another programming language).\n ATTENTION: Use '##' to SPLIT SECTIONS, not '#'. Output format carefully referenced \"Format example\".\n\n ## Code Review: Based on the following context and code, and following the check list, Provide key, clear, concise, and specific code modification suggestions, up to 7.\n ```\n 1. Check 0: Is the code implemented as per the requirements?\n 2. Check 1: Are there any issues with the code logic?\n 3. Check 2: Does the existing code follow the \"Data structures and interface definitions\"?\n 4. Check 3: Is there a function in the code that is omitted or not fully implemented that needs to be implemented?\n 5. Check 4: Does the code have unnecessary or lack dependencies?\n 6. Check 5: Does the code match the rest of the code?\n 7. Check 6: How to fix the code if there is any error?\n ```\n\n ## Rewrite Code: {filename} Base on \"Code Review\" and the source code and the code Execution Results , rewrite code with triple quotes. Do your utmost to optimize THIS SINGLE FILE. \n -----\n \n ## here is the logic analysis of the code:\n {logic_analysis}\n \n ## here is the shared knowledge of the code:\n {shared_knowledge}\n \n \n ## here is the code execution results:\n filename:{filename}\n \n {execution_results}\n \n if there is any error, please check the code and modify it.\n If any function or class is not defined, define it.\n \n ## the code that you have to review and rewrite: \n filename:{filename}\n\n {code}\n \n ----\n ## Format example\n -----\n {format_example}\n -----\n\n ",
"filename",
"format_example",
"execution_results"
] |
2024-01-10 | milk333445/Automatic_code_writing_assistant | pages~4_Contact.py | import streamlit as st
import pandas as pd
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, OpenAI, LLMChain
import time
import os
import re
from typing import List
from langchain.callbacks import get_openai_callback
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage,
BaseMessage
)
class CAMELAgent:
def __init__(
self,
system_message: SystemMessage,
model: ChatOpenAI,
max_messages: int = 10000,
) -> None:
self.system_message = system_message
self.model = model
self.max_messages = max_messages
self.init_messages()
def reset(self) -> None:
self.init_messages()
return self.stored_messages
def init_messages(self) -> None:
self.stored_messages = [self.system_message]
def update_messages(self, message: BaseMessage) -> List[BaseMessage]:
self.stored_messages.append(message)
while len(self.stored_messages) > self.max_messages:
# Remove the oldest HumanMessage or AIMessage
self.stored_messages.pop(1)
return self.stored_messages
def step(
self,
input_message: HumanMessage,
) -> AIMessage:
messages = self.update_messages(input_message)
output_message = self.model(messages)
self.update_messages(output_message)
return output_message
def get_sys_msgs(assistant_role_name: str, assistant_inception_prompt, main_task, file_list_str, code_history_str, data_api_design):
assistant_sys_template = SystemMessagePromptTemplate.from_template(template=assistant_inception_prompt)
assistant_sys_msg = assistant_sys_template.format_messages(assistant_role_name=assistant_role_name,
main_task = main_task,
file_list_str = file_list_str,
code_history_str = code_history_str,
data_api_design = data_api_design)[0]
return assistant_sys_msg
def show_code(code_history):
if not code_history:
return ""
code_history_str = ""
for snippet in code_history:
code_history_str += f"## {snippet.title}\n"
code_history_str += f"```python\n{snippet.code}\n```\n"
return code_history_str
st.title("Code Assistant")
#初始化
if "messages" not in st.session_state:
st.session_state.messages = []
#把之前的對話紀錄在重整時顯現
for message in st.session_state.messages:
with st.chat_message(message['role']):
st.markdown(message['content'])
with st.sidebar:
openai_api_key = st.text_input('OpenAI API Key', '', type="password")
st.info("如果您有任何與任務相關問題,請在下方輸入,我會盡力回答您。")
os.environ['OPENAI_API_KEY'] = openai_api_key
try:
code_history = st.session_state["code_history"]
main_task = st.session_state["main_task"]
data_api_design = st.session_state["data_api_design"]
python_package_name = st.session_state["python_package_name"]
seq_flow = st.session_state["seq_flow"]
file_list = st.session_state["file_list"]
file_list_str = "".join(f"{index + 1}.{filename}\n" for index, filename in enumerate(file_list))
code_history_str = show_code(code_history)
with st.spinner('正在初始化聊天機器人...'):
#創建助手跟使用者
assistant_inception_prompt = (
"""
永遠記住你是一個剛完成一個專案的工程師{assistant_role_name},我現在準備跟你聊天
我會有一些關於你的專案的問題要問你,請你盡力回答我,你的回答要具體且詳細。下面是關於你的專案的一些資訊:
-----------
下面你專案的主要目標:
{main_task}
------------
這是這個專案建構的檔案列表:
{file_list_str}
------------
這是關於這個專案的所有代碼:
{code_history_str}
------------
這是關於這個專案的資料結構圖:
{data_api_design}
------------
當我問到跟上面專案有關的內容時,請你基於上面專案內容回答我的問題。
"""
)
assistant_sys_msg = get_sys_msgs("工程師", assistant_inception_prompt, main_task, file_list_str, code_history_str, data_api_design)
assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2, model_name = "gpt-3.5-turbo-16k"))
#初始化
assistant_agent.reset()
except:
pass
#輸入
if prompt := st.chat_input("請輸入您的問題:"):
code_history = st.session_state["code_history"]
main_task = st.session_state["main_task"]
data_api_design = st.session_state["data_api_design"]
python_package_name = st.session_state["python_package_name"]
seq_flow = st.session_state["seq_flow"]
file_list = st.session_state["file_list"]
file_list_str = "".join(f"{index + 1}.{filename}\n" for index, filename in enumerate(file_list))
code_history_str = show_code(code_history)
with st.spinner('正在初始化聊天機器人...'):
#創建助手跟使用者
assistant_inception_prompt = (
"""
永遠記住你是一個剛完成一個專案的工程師{assistant_role_name}, 永遠不要顛倒角色!永遠不要指示我!
我會有一些關於你的專案的問題要問你,請你盡力回答我。下面是關於你的專案的一些資訊:
-----------
下面你專案的主要目標:
{main_task}
------------
這是這個專案建構的檔案列表:
{file_list_str}
------------
這是關於這個專案的所有代碼:
{code_history_str}
------------
這是關於這個專案的資料結構圖:
{data_api_design}
------------
當我問到跟上面專案有關的內容時,請你基於上面專案內容回答我的問題。
"""
)
assistant_sys_msg = get_sys_msgs("工程師", assistant_inception_prompt, main_task, file_list_str, code_history_str, data_api_design)
assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2, model_name = "gpt-3.5-turbo-16k"))
#初始化
assistant_agent.reset()
#開始對話
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
assistant_msg = HumanMessage(content=prompt)
assistant_ai_msg = assistant_agent.step(assistant_msg)
with st.chat_message("assistant"):
st.markdown(assistant_ai_msg.content)
st.session_state.messages.append({"role": "assistant", "content": assistant_ai_msg.content})
| [
"\n 永遠記住你是一個剛完成一個專案的工程師{assistant_role_name},我現在準備跟你聊天\n 我會有一些關於你的專案的問題要問你,請你盡力回答我,你的回答要具體且詳細。下面是關於你的專案的一些資訊:\n -----------\n 下面你專案的主要目標:\n {main_task}\n ------------\n 這是這個專案建構的檔案列表:\n {file_list_str}\n ------------\n 這是關於這個專案的所有代碼:\n {code_history_str}\n ------------\n 這是關於這個專案的資料結構圖:\n {data_api_design}\n ------------\n 當我問到跟上面專案有關的內容時,請你基於上面專案內容回答我的問題。\n ",
"\n 永遠記住你是一個剛完成一個專案的工程師{assistant_role_name}, 永遠不要顛倒角色!永遠不要指示我!\n 我會有一些關於你的專案的問題要問你,請你盡力回答我。下面是關於你的專案的一些資訊:\n -----------\n 下面你專案的主要目標:\n {main_task}\n ------------\n 這是這個專案建構的檔案列表:\n {file_list_str}\n ------------\n 這是關於這個專案的所有代碼:\n {code_history_str}\n ------------\n 這是關於這個專案的資料結構圖:\n {data_api_design}\n ------------\n 當我問到跟上面專案有關的內容時,請你基於上面專案內容回答我的問題。\n "
] |
2024-01-10 | milk333445/Automatic_code_writing_assistant | pages~2_Projects.py | import streamlit as st
from langchain import OpenAI
from langchain import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from serpapi import GoogleSearch
from langchain.document_loaders import WebBaseLoader
import re
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
from langchain.schema import Document
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
import numpy as np
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, OpenAI, LLMChain
import os
import shutil
with st.sidebar:
openai_api_key = st.text_input('OpenAI API Key', '', type="password")
os.environ['OPENAI_API_KEY'] = openai_api_key
#函數整理
import ast
#parser
class CodeParser:
@classmethod
def parse_block(cls, block: str, text: str) -> str:
blocks = cls.parse_blocks(text)
for k, v in blocks.items():
if block in k:
return v
return ""
@classmethod
def parse_blocks(cls, text: str):
#根據文本切割成多個塊
blocks = text.split('##')
#創建字典儲存每個block的標題跟內容
block_dict = {}
for block in blocks:
#如果block不為空則繼續處理
if block.strip() != "":
block_title, block_content = block.split('\n', 1)
block_dict[block_title.strip()] = block_content.strip()
return block_dict
@classmethod
def parse_code(cls, block: str, text: str, lang: str = "") -> str:
if block:
text = cls.parse_block(block, text)
pattern = rf'```{lang}.*?\s+(.*?)```'
match = re.search(pattern, text, re.DOTALL)
if match:
code = match.group(1)
else:
raise Exception(f"Error parsing code block: {block}")
return code
@classmethod
def parse_str(cls, block: str, text: str, lang: str = ""):
code = cls.parse_code(block, text, lang)
code = code.split("=")[-1]
code = code.strip().strip("'").strip("\"")
return code
@classmethod
def parse_file_list(cls, block: str, text: str, lang: str = "")->list[str]:
code = cls.parse_code(block, text, lang)
pattern = r'\s*(.*=.*)?(\[.*\])'
match = re.search(pattern, code, re.DOTALL)
if match:
tasks_list_str = match.group(2)
tasks = ast.literal_eval(tasks_list_str)
else:
raise Exception
return tasks
class OutputParser:
@classmethod
def parse_blocks(cls, text: str):
# 首先根据"##"将文本分割成不同的block
blocks = text.split("##")
# 创建一个字典,用于存储每个block的标题和内容
block_dict = {}
# 遍历所有的block
for block in blocks:
# 如果block不为空,则继续处理
if block.strip() != "":
# 将block的标题和内容分开,并分别去掉前后的空白字符
block_title, block_content = block.split("\n", 1)
# LLM可能出错,在这里做一下修正
if block_title[-1] == ":":
block_title = block_title[:-1]
block_dict[block_title.strip()] = block_content.strip()
return block_dict
@classmethod
def parse_code(cls, text: str, lang: str = "") -> str:
pattern = rf'```{lang}.*?\s+(.*?)```'
match = re.search(pattern, text, re.DOTALL)
if match:
code = match.group(1)
else:
raise Exception
return code
@classmethod
def parse_str(cls, text: str):
text = text.split("=")[-1]
text = text.strip().strip("'").strip("\"")
return text
@classmethod
def parse_file_list(cls, text: str) -> list[str]:
# Regular expression pattern to find the tasks list.
pattern = r'\s*(.*=.*)?(\[.*\])'
# Extract tasks list string using regex.
match = re.search(pattern, text, re.DOTALL)
if match:
tasks_list_str = match.group(2)
# Convert string representation of list to a Python list using ast.literal_eval.
tasks = ast.literal_eval(tasks_list_str)
else:
tasks = text.split("\n")
return tasks
#寫程式
def code_writing(filename, context, past_code, data_api_design):
llm = ChatOpenAI(temperature =0.5, model_name = "gpt-3.5-turbo-16k")
PROMPT_TEMPLATE = """
NOTICE
Role: You are a professional engineer; the main goal is to write PEP8 compliant, elegant, modular, easy to read and maintain Python 3.9 code (but you can also use other programming language)
ATTENTION: Use '##' to SPLIT SECTIONS, not '#'. Output format carefully referenced "Format example".
## Code: {filename} Write code with triple quoto, based on the following list and context.
1. Do your best to implement THIS ONLY ONE FILE. ONLY USE EXISTING API. IF NO API, IMPLEMENT IT.
2. Requirement: Based on the context, implement one following code file, note to return only in code form, your code will be part of the entire project, so please implement complete, reliable, reusable code snippets
3. Attention1: If there is any setting, ALWAYS SET A DEFAULT VALUE, ALWAYS USE STRONG TYPE AND EXPLICIT VARIABLE.
4. Attention2: YOU MUST FOLLOW "Data structures and interface definitions". DONT CHANGE ANY DESIGN.
5. Think before writing: What should be implemented and provided in this document?
6. CAREFULLY CHECK THAT YOU DONT MISS ANY NECESSARY CLASS/FUNCTION IN THIS FILE.
7. Do not use public member functions that do not exist in your design.
-----
# Context
{context}
-----
## Data structures and interface definitions
{data_api_design}
## the past code that we have created
```python
{past_code}
```
## Format example
-----
## Code: {filename}
```python
## {filename}
...
```
-----
"""
prompt = PromptTemplate(template=PROMPT_TEMPLATE, input_variables=["filename", "context", "data_api_design", "past_code"])
chain = LLMChain(llm=llm, prompt=prompt)
raw_code = chain.run(filename = filename, context = context, data_api_design = data_api_design, past_code = past_code)
return raw_code
#寫函數迴圈
#寫函數
class CodeSnippet:
def __init__(self, title, code):
self.title = title
self.code = code
def show_code(code_history):
if not code_history:
return ""
code_history_str = ""
for snippet in code_history:
code_history_str += f"## {snippet.title}\n"
code_history_str += f"```python\n{snippet.code}\n```\n"
return code_history_str
def file_list_code_writing(file_list, content, data_api_design):
file_list_str = "".join(f"{index + 1}.{filename}\n" for index, filename in enumerate(file_list))
code_history = []
folder_name = "first_version"
# 檢查資料夾是否存在
if os.path.exists(folder_name):
# 如果資料夾存在,刪除資料夾及其內容
shutil.rmtree(folder_name)
#創建新資料夾
os.mkdir(folder_name)
for i in range(len(file_list)):
code_history_str = show_code(code_history)
raw_code = code_writing(file_list[i], content, code_history_str, data_api_design)
code = OutputParser.parse_code(raw_code, "python")
with st.expander(f"{file_list[i]} 已完成"):
st.code(code)
#寫入檔案
with open(f"first_version/{file_list[i]}", "w") as f:
f.write(code)
st.session_state[file_list[i]] = code
code_main = CodeSnippet(file_list[i], code)
code_history.append(code_main)
return code_history
#寫函數
def write_task(content):
llm = ChatOpenAI(temperature =0.5, model_name = "gpt-3.5-turbo-16k")
#模板
prompt_template = '''
# Context
{context}
## Format example
{format_example}
-----
Role: You are a project manager; the goal is to break down tasks according to the content above, give a task list, and analyze task dependencies to start with the prerequisite modules
Requirements: Based on the context, fill in the following missing information, note that all sections are returned in Python code triple quote form seperatedly. Here the granularity of the task is a file, if there are any missing files, you can supplement them
Attention: Use '##' to split sections, not '#', and '## <SECTION_NAME>' SHOULD WRITE BEFORE the code and triple quote.
## Required Python third-party packages: Provided in requirements.txt format
## Required Other language third-party packages: Provided in requirements.txt format
## Full API spec: Use OpenAPI 3.0. Describe all APIs that may be used by both frontend and backend.
## Logic Analysis: Provided as a Python list[str, str]. the first is filename, the second is class/method/function should be implemented in this file. Analyze the dependencies between the files, which work should be done first
## Task list: Provided as Python list[str]. Each str is a filename, the more at the beginning, the more it is a prerequisite dependency, should be done first
## Shared Knowledge: Anything that should be public like utils' functions, config's variables details that should make clear first.
## Anything UNCLEAR: Provide as Plain text. Make clear here. For example, don't forget a main entry. don't forget to init 3rd party libs.
'''
FORMAT_EXAMPLE = '''
---
## Required Python third-party packages
```python
"""
flask==1.1.2
bcrypt==3.2.0
"""
```
## Required Other language third-party packages
```python
"""
No third-party ...
"""
```
## Full API spec
```python
"""
openapi: 3.0.0
...
description: A JSON object ...
"""
```
## Logic Analysis
```python
[
("game.py", "Contains ..."),
]
```
## Task list
```python
[
"game.py",
]
```
## Shared Knowledge
```python
"""
'game.py' contains ...
"""
```
## Anything UNCLEAR
We need ... how to start.
---
'''
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "format_example"])
chain = LLMChain(llm=llm, prompt=prompt)
main_task = chain.run(context=content, format_example=FORMAT_EXAMPLE)
return main_task
st.title("Code Generator")
st.info(f"最後相關程式碼會幫您整理到資料夾 workplace 中,請等我撰寫完成後至資料夾中查看相關程式檔。")
if st.button("產生具體任務"):
with st.spinner('Generating task...'):
main_mission = write_task(st.session_state["raw_content"])
logic_analysis = CodeParser.parse_file_list(block = "Logic Analysis", text = main_mission)
st.session_state["logic_analysis"] = logic_analysis
share_knowledge = CodeParser.parse_code(block = "Shared Knowledge", text = main_mission)
st.session_state["share_knowledge"] = share_knowledge
st.session_state["main_mission"] = main_mission
with st.expander("任務"):
st.write(st.session_state["main_mission"])
if st.button("🤖產生程式碼"):
with st.spinner('Generating code...'):
main_task = st.session_state["main_task"]
data_api_design = st.session_state["data_api_design"]
python_package_name = st.session_state["python_package_name"]
seq_flow = st.session_state["seq_flow"]
file_list = CodeParser.parse_file_list("Task list", st.session_state["main_mission"])
st.session_state["file_list"] = file_list
raw_content = st.session_state["raw_content"]
#撰寫程式
content = f"logic_analysis = {st.session_state['logic_analysis']}\nshare_knowledge = {st.session_state['share_knowledge']}"
code_history = file_list_code_writing(file_list, content, data_api_design)
st.session_state["code_history"] = code_history
if st.button("查看歷史紀錄"):
st.write("相關程式碼最後會幫您整理到資料夾中,請至資料夾中查看。")
with st.expander("具體任務"):
st.write(st.session_state["main_mission"])
for i in range(len(st.session_state["file_list"])):
with st.expander(f"{st.session_state['file_list'][i]}"):
st.code(st.session_state[st.session_state["file_list"][i]]) | [
"\n NOTICE\n Role: You are a professional engineer; the main goal is to write PEP8 compliant, elegant, modular, easy to read and maintain Python 3.9 code (but you can also use other programming language)\n ATTENTION: Use '##' to SPLIT SECTIONS, not '#'. Output format carefully referenced \"Format example\".\n\n ## Code: {filename} Write code with triple quoto, based on the following list and context.\n 1. Do your best to implement THIS ONLY ONE FILE. ONLY USE EXISTING API. IF NO API, IMPLEMENT IT.\n 2. Requirement: Based on the context, implement one following code file, note to return only in code form, your code will be part of the entire project, so please implement complete, reliable, reusable code snippets\n 3. Attention1: If there is any setting, ALWAYS SET A DEFAULT VALUE, ALWAYS USE STRONG TYPE AND EXPLICIT VARIABLE.\n 4. Attention2: YOU MUST FOLLOW \"Data structures and interface definitions\". DONT CHANGE ANY DESIGN.\n 5. Think before writing: What should be implemented and provided in this document?\n 6. CAREFULLY CHECK THAT YOU DONT MISS ANY NECESSARY CLASS/FUNCTION IN THIS FILE.\n 7. Do not use public member functions that do not exist in your design.\n\n -----\n # Context\n {context}\n -----\n ## Data structures and interface definitions\n {data_api_design}\n \n ## the past code that we have created\n ```python\n {past_code}\n ```\n ## Format example\n -----\n ## Code: {filename}\n ```python\n ## {filename}\n ...\n ```\n -----\n ",
"filename",
"\n # Context\n {context}\n\n ## Format example\n {format_example}\n -----\n Role: You are a project manager; the goal is to break down tasks according to the content above, give a task list, and analyze task dependencies to start with the prerequisite modules\n Requirements: Based on the context, fill in the following missing information, note that all sections are returned in Python code triple quote form seperatedly. Here the granularity of the task is a file, if there are any missing files, you can supplement them\n Attention: Use '##' to split sections, not '#', and '## <SECTION_NAME>' SHOULD WRITE BEFORE the code and triple quote.\n\n ## Required Python third-party packages: Provided in requirements.txt format\n\n ## Required Other language third-party packages: Provided in requirements.txt format\n\n ## Full API spec: Use OpenAPI 3.0. Describe all APIs that may be used by both frontend and backend.\n\n ## Logic Analysis: Provided as a Python list[str, str]. the first is filename, the second is class/method/function should be implemented in this file. Analyze the dependencies between the files, which work should be done first\n\n ## Task list: Provided as Python list[str]. Each str is a filename, the more at the beginning, the more it is a prerequisite dependency, should be done first\n\n ## Shared Knowledge: Anything that should be public like utils' functions, config's variables details that should make clear first. \n\n ## Anything UNCLEAR: Provide as Plain text. Make clear here. For example, don't forget a main entry. don't forget to init 3rd party libs.\n\n ",
"context",
"format_example",
"data_api_design"
] |
2024-01-10 | milk333445/Automatic_code_writing_assistant | homepage.py | import streamlit as st
st.set_page_config(
page_title="Automatic code writing robot",
page_icon="🧊"
)
import streamlit as st
import pandas as pd
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, OpenAI, LLMChain
import time
import os
import ast
import re
from time import sleep
from PIL import Image
import subprocess
from pathlib import Path
import json, logging, pickle, sys, shutil, copy
class CodeParser:
@classmethod
def parse_block(cls, block: str, text: str) -> str:
blocks = cls.parse_blocks(text)
for k, v in blocks.items():
if block in k:
return v
return ""
@classmethod
def parse_blocks(cls, text: str):
#根據文本切割成多個塊
blocks = text.split('##')
#創建字典儲存每個block的標題跟內容
block_dict = {}
for block in blocks:
#如果block不為空則繼續處理
if block.strip() != "":
block_title, block_content = block.split('\n', 1)
block_dict[block_title.strip()] = block_content.strip()
return block_dict
@classmethod
def parse_code(cls, block: str, text: str, lang: str = "") -> str:
if block:
text = cls.parse_block(block, text)
pattern = rf'```{lang}.*?\s+(.*?)```'
match = re.search(pattern, text, re.DOTALL)
if match:
code = match.group(1)
else:
raise Exception(f"Error parsing code block: {block}")
return code
@classmethod
def parse_str(cls, block: str, text: str, lang: str = ""):
code = cls.parse_code(block, text, lang)
code = code.split("=")[-1]
code = code.strip().strip("'").strip("\"")
return code
@classmethod
def parse_file_list(cls, block: str, text: str, lang: str = "")->list[str]:
code = cls.parse_code(block, text, lang)
pattern = r'\s*(.*=.*)?(\[.*\])'
match = re.search(pattern, code, re.DOTALL)
if match:
tasks_list_str = match.group(2)
tasks = ast.literal_eval(tasks_list_str)
else:
raise Exception
return tasks
#函數設定
def generate_task(task):
llm = ChatOpenAI(temperature =0.5, model_name = "gpt-3.5-turbo-16k")
prompt_template = """
# Context
{context}
## Format example
{format_example}
-----
Role: You are an architect; the goal is to design a SOTA PEP8-compliant python system; make the best use of good open source tools
Requirement: Fill in the following missing information based on the context, note that all sections are response with code form separately
Max Output: 8192 chars or 2048 tokens. Try to use them up.
Attention: Use '##' to split sections, not '#', and '## <SECTION_NAME>' SHOULD WRITE BEFORE the code and triple quote.
## Implementation approach: Provide as Plain text. Analyze the difficult points of the requirements, select the appropriate open-source framework.
## Python package name: Please provide the necessary Python packages in the form of a Python list[str], using triple quotes for Python, and keep it concise and clear.
## File list: Provided as Python list[str], the list of ONLY REQUIRED files needed to write the program(LESS IS MORE!). Only need relative paths, comply with PEP8 standards. ALWAYS write a main.py or app.py here
## Data structures and interface definitions: Use mermaid classDiagram code syntax, including classes (INCLUDING __init__ method) and functions (with type annotations), CLEARLY MARK the RELATIONSHIPS between classes, and comply with PEP8 standards. The data structures SHOULD BE VERY DETAILED and the API should be comprehensive with a complete design.
## Program call flow: Use sequenceDiagram code syntax, COMPLETE and VERY DETAILED, using CLASSES AND API DEFINED ABOVE accurately, covering the CRUD AND INIT of each object, SYNTAX MUST BE CORRECT.
## Anything UNCLEAR: Provide as Plain text. Make clear here.
"""
FORMAT_EXAMPLE ="""
---
## Implementation approach
We will ...
## Python package name
```python
[
"numpy",
]
```
## File list
```python
[
"main.py",
]
```
## Data structures and interface definitions
```mermaid
classDiagram
class Game{
+int score
}
...
Game "1" -- "1" Food: has
```
## Program call flow
```mermaid
sequenceDiagram
participant M as Main
...
G->>M: end game
```
## Anything UNCLEAR
The requirement is clear to me.
---
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "format_example"])
chain = LLMChain(llm=llm, prompt=prompt)
raw_content = chain.run(context = task, format_example = FORMAT_EXAMPLE)
data_api_design = CodeParser.parse_code(block="Data structures and interface definitions", text=raw_content)
python_package_name = CodeParser.parse_file_list(block="Python package name", text=raw_content)
seq_flow = CodeParser.parse_code(block="Program call flow", text=raw_content)
file_list = CodeParser.parse_file_list(block="File list", text=raw_content)
main_task = raw_content.split("##")[1]
unclear = raw_content.split("##")[-1]
return main_task, data_api_design, python_package_name, seq_flow, file_list, raw_content, unclear
def mermaid_to_png(mermaid_code, output_file, width=2048, height=2048):
# Write the Mermaid code to a temporary file
tmp = Path(f'{output_file}.mmd')
tmp.write_text(mermaid_code, encoding='utf-8')
output_file = f'{output_file}.png'
# Call the mmdc command to convert the Mermaid code to a SVG
mmdc_path = shutil.which('mmdc.cmd')
subprocess.run([mmdc_path, '-i', str(tmp), '-o', output_file, '-w', str(width), '-H', str(height)])
#app架構
with st.sidebar:
st.write("## 請輸入以下資料:")
openai_api_key = st.text_input('OpenAI API Key', '', type="password")
if "onenai_api_key" not in st.session_state:
st.session_state["openai_api_key"] = ""
os.environ['OPENAI_API_KEY'] = openai_api_key
#儲存變數
if "main_task" not in st.session_state:
st.session_state["main_task"] = ""
if "data_api_design" not in st.session_state:
st.session_state["data_api_design"] = ""
if "python_package_name" not in st.session_state:
st.session_state["python_package_name"] = ""
if "seq_flow" not in st.session_state:
st.session_state["seq_flow"] = ""
if "file_list" not in st.session_state:
st.session_state["file_list"] = ""
if "raw_content" not in st.session_state:
st.session_state["raw_content"] = ""
if "unclear" not in st.session_state:
st.session_state["unclear"] = ""
if "task" not in st.session_state:
st.session_state["task"] = ""
if "detailed_goal" not in st.session_state:
st.session_state["detailed_goal"] = ""
st.session_state['openai_api_key'] = os.environ['OPENAI_API_KEY']
#app架構
st.title("🤖Automated Task Completion")
task = st.text_input("您想要解決的任務是什麼?")
with st.expander("更詳細的目標"):
detailed_goal = st.text_area("請在此輸入更詳細的內容:")
if st.button("🤖開始分析"):
if task:
st.session_state["task"] = task
st.session_state["detailed_goal"] = detailed_goal
if not openai_api_key.startswith('sk-'):
st.warning('Please enter your OpenAI API key in the sidebar')
else:
with st.spinner('Generating...'):
main_task, data_api_design, python_package_name, seq_flow, file_list, raw_content, unclear = generate_task(task + "\n" + "關於此任務的一些補充敘述: " + detailed_goal)
st.session_state["main_task"] = main_task
st.session_state["data_api_design"] = data_api_design
st.session_state["python_package_name"] = python_package_name
st.session_state["seq_flow"] = seq_flow
st.session_state["file_list"] = file_list
st.session_state["raw_content"] = raw_content
st.session_state["unclear"] = unclear
st.write("## 👇🏻我的任務內容如下:")
st.info(main_task)
#stmd.st_mermaid(data_api_design)
st.write("## 👇🏻我的資料結構設計如下:")
mermaid_to_png(data_api_design, "data_api_design")
image = Image.open('data_api_design.png')
st.image(image, caption='Data structures and interface definitions')
st.write("## 👇🏻文件列表:")
st.write(file_list)
st.write("## 👇🏻不清楚的地方:")
st.info(unclear)
else:
st.warning("請輸入任務要求")
with st.expander("查看歷史紀錄"):
st.write("## 👇🏻原始問題為:")
st.info(st.session_state["task"])
st.write("## 👇🏻我的任務內容如下:")
st.info(st.session_state["main_task"])
st.write("## 👇🏻必要安裝套件如下:")
st.write(st.session_state["python_package_name"])
#stmd.st_mermaid(data_api_design)
st.write("## 👇🏻我的資料結構設計如下:")
try:
image = Image.open('data_api_design.png')
st.image(image, caption='Data structures and interface definitions')
except:
st.write("尚未生成圖片")
st.write("## 👇🏻文件列表:")
st.write(st.session_state["file_list"])
st.write("## 👇🏻不清楚的地方:")
st.info(st.session_state["unclear"])
| [
"context",
"\n # Context\n {context}\n\n ## Format example\n {format_example}\n -----\n Role: You are an architect; the goal is to design a SOTA PEP8-compliant python system; make the best use of good open source tools\n Requirement: Fill in the following missing information based on the context, note that all sections are response with code form separately\n Max Output: 8192 chars or 2048 tokens. Try to use them up.\n Attention: Use '##' to split sections, not '#', and '## <SECTION_NAME>' SHOULD WRITE BEFORE the code and triple quote.\n\n ## Implementation approach: Provide as Plain text. Analyze the difficult points of the requirements, select the appropriate open-source framework.\n\n ## Python package name: Please provide the necessary Python packages in the form of a Python list[str], using triple quotes for Python, and keep it concise and clear.\n\n ## File list: Provided as Python list[str], the list of ONLY REQUIRED files needed to write the program(LESS IS MORE!). Only need relative paths, comply with PEP8 standards. ALWAYS write a main.py or app.py here\n\n ## Data structures and interface definitions: Use mermaid classDiagram code syntax, including classes (INCLUDING __init__ method) and functions (with type annotations), CLEARLY MARK the RELATIONSHIPS between classes, and comply with PEP8 standards. The data structures SHOULD BE VERY DETAILED and the API should be comprehensive with a complete design. \n\n ## Program call flow: Use sequenceDiagram code syntax, COMPLETE and VERY DETAILED, using CLASSES AND API DEFINED ABOVE accurately, covering the CRUD AND INIT of each object, SYNTAX MUST BE CORRECT.\n\n ## Anything UNCLEAR: Provide as Plain text. Make clear here.\n\n ",
"format_example"
] |
2024-01-10 | cyber-xcode/openai-python | openai~api_resources~abstract~engine_api_resource.py | import time
from pydoc import apropos
from typing import Optional
from urllib.parse import quote_plus
import openai
from openai import api_requestor, error, util
from openai.api_resources.abstract.api_resource import APIResource
from openai.openai_response import OpenAIResponse
from openai.util import ApiType
MAX_TIMEOUT = 20
class EngineAPIResource(APIResource):
plain_old_data = False
def __init__(self, engine: Optional[str] = None, **kwargs):
super().__init__(engine=engine, **kwargs)
@classmethod
def class_url(
cls,
engine: Optional[str] = None,
api_type: Optional[str] = None,
api_version: Optional[str] = None,
):
# Namespaces are separated in object names with periods (.) and in URLs
# with forward slashes (/), so replace the former with the latter.
base = cls.OBJECT_NAME.replace(".", "/") # type: ignore
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
if not api_version:
raise error.InvalidRequestError(
"An API version is required for the Azure API type."
)
if engine is None:
raise error.InvalidRequestError(
"You must provide the deployment name in the 'engine' parameter to access the Azure OpenAI service"
)
extn = quote_plus(engine)
return "/%s/%s/%s/%s?api-version=%s" % (
cls.azure_api_prefix,
cls.azure_deployments_prefix,
extn,
base,
api_version,
)
elif typed_api_type == ApiType.OPEN_AI:
if engine is None:
return "/%s" % (base)
extn = quote_plus(engine)
return "/engines/%s/%s" % (extn, base)
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
@classmethod
def __prepare_create_request(
cls,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
deployment_id = params.pop("deployment_id", None)
if deployment_id is None:
import os
deployment_id = os.environ.get("OPENAI_API_ENGINE", None)
engine = params.pop("engine", deployment_id)
model = params.get("model", None)
timeout = params.pop("timeout", None)
stream = params.get("stream", False)
headers = params.pop("headers", None)
request_timeout = params.pop("request_timeout", None)
typed_api_type = cls._get_api_type_and_version(api_type=api_type)[0]
if typed_api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
if deployment_id is None and engine is None:
raise error.InvalidRequestError(
"Must provide an 'engine' or 'deployment_id' parameter to create a %s"
% cls,
"engine",
)
else:
if model is None and engine is None:
raise error.InvalidRequestError(
"Must provide an 'engine' or 'model' parameter to create a %s"
% cls,
"engine",
)
if timeout is None:
# No special timeout handling
pass
elif timeout > 0:
# API only supports timeouts up to MAX_TIMEOUT
params["timeout"] = min(timeout, MAX_TIMEOUT)
timeout = (timeout - params["timeout"]) or None
elif timeout == 0:
params["timeout"] = MAX_TIMEOUT
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
url = cls.class_url(engine, api_type, api_version)
return (
deployment_id,
engine,
timeout,
stream,
headers,
request_timeout,
typed_api_type,
requestor,
url,
params,
)
@classmethod
def create(
cls,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
(
deployment_id,
engine,
timeout,
stream,
headers,
request_timeout,
typed_api_type,
requestor,
url,
params,
) = cls.__prepare_create_request(
api_key, api_base, api_type, api_version, organization, **params
)
response, _, api_key = requestor.request(
"post",
url,
params=params,
headers=headers,
stream=stream,
request_id=request_id,
request_timeout=request_timeout,
)
if stream:
# must be an iterator
assert not isinstance(response, OpenAIResponse)
return (
util.convert_to_openai_object(
line,
api_key,
api_version,
organization,
engine=engine,
plain_old_data=cls.plain_old_data,
)
for line in response
)
else:
obj = util.convert_to_openai_object(
response,
api_key,
api_version,
organization,
engine=engine,
plain_old_data=cls.plain_old_data,
)
if timeout is not None:
obj.wait(timeout=timeout or None)
return obj
@classmethod
async def acreate(
cls,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
(
deployment_id,
engine,
timeout,
stream,
headers,
request_timeout,
typed_api_type,
requestor,
url,
params,
) = cls.__prepare_create_request(
api_key, api_base, api_type, api_version, organization, **params
)
response, _, api_key = await requestor.arequest(
"post",
url,
params=params,
headers=headers,
stream=stream,
request_id=request_id,
request_timeout=request_timeout,
)
if stream:
# must be an iterator
assert not isinstance(response, OpenAIResponse)
return (
util.convert_to_openai_object(
line,
api_key,
api_version,
organization,
engine=engine,
plain_old_data=cls.plain_old_data,
)
async for line in response
)
else:
obj = util.convert_to_openai_object(
response,
api_key,
api_version,
organization,
engine=engine,
plain_old_data=cls.plain_old_data,
)
if timeout is not None:
await obj.await_(timeout=timeout or None)
return obj
def instance_url(self):
id = self.get("id")
if not isinstance(id, str):
raise error.InvalidRequestError(
f"Could not determine which URL to request: {type(self).__name__} instance has invalid ID: {id}, {type(id)}. ID should be of type str.",
"id",
)
extn = quote_plus(id)
params_connector = "?"
if self.typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
api_version = self.api_version or openai.api_version
if not api_version:
raise error.InvalidRequestError(
"An API version is required for the Azure API type."
)
base = self.OBJECT_NAME.replace(".", "/")
url = "/%s/%s/%s/%s/%s?api-version=%s" % (
self.azure_api_prefix,
self.azure_deployments_prefix,
self.engine,
base,
extn,
api_version,
)
params_connector = "&"
elif self.typed_api_type == ApiType.OPEN_AI:
base = self.class_url(self.engine, self.api_type, self.api_version)
url = "%s/%s" % (base, extn)
else:
raise error.InvalidAPIType("Unsupported API type %s" % self.api_type)
timeout = self.get("timeout")
if timeout is not None:
timeout = quote_plus(str(timeout))
url += params_connector + "timeout={}".format(timeout)
return url
def wait(self, timeout=None):
start = time.time()
while self.status != "complete":
self.timeout = (
min(timeout + start - time.time(), MAX_TIMEOUT)
if timeout is not None
else MAX_TIMEOUT
)
if self.timeout < 0:
del self.timeout
break
self.refresh()
return self
async def await_(self, timeout=None):
"""Async version of `EngineApiResource.wait`"""
start = time.time()
while self.status != "complete":
self.timeout = (
min(timeout + start - time.time(), MAX_TIMEOUT)
if timeout is not None
else MAX_TIMEOUT
)
if self.timeout < 0:
del self.timeout
break
await self.arefresh()
return self
| [] |
2024-01-10 | kkur0same/slackbot_agent | lazy_model.py | from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings, HuggingFaceInstructEmbeddings
''' lazy model and embeddings loading'''
class LazyModel:
def __init__(self, config):
self.config = config
self._model = None
@property
def model(self):
if self._model is None:
# load model here based on self.config
if self.config['type'] == 'ChatOpenAI':
#self._model = ChatOpenAI(temperature=self.config['temperature'], model_name=self.config['model_name'])
model_kwargs = self.config.copy()
model_kwargs.pop('type', None)
model_kwargs.pop('name', None)
self._model = ChatOpenAI(**model_kwargs)
return self._model
class LazyEmbedding:
def __init__(self, config):
self.config = config
self._embedding = None
@property
def embedding(self):
if self._embedding is None:
if self.config['type'] == 'HuggingFaceInstructEmbeddings':
self._embedding = HuggingFaceInstructEmbeddings(
embed_instruction=self.config.get('embed_instruction'),
query_instruction=self.config.get('query_instruction')
)
elif self.config['type'] == 'OpenAIEmbeddings':
self._embedding = OpenAIEmbeddings()
elif self.config['type'] == 'HuggingFaceEmbeddings':
model_name = self.config.get('model_name')
self._embedding = HuggingFaceEmbeddings(model_name=model_name)
return self._embedding
| [] |
2024-01-10 | jennhu/metalinguistic-prompting | utils~io.py | import json
from time import gmtime, strftime
import argparse
import torch
from . import openai_api, models
# Extremely basic helper functions.
def timestamp():
return strftime("%Y-%m-%d %H:%M:%S", gmtime())
def dict2json(d, out_file):
with open(out_file, "w") as fp:
json.dump(d, fp, indent=2)
def json2dict(in_file):
with open(in_file, "r") as fp:
d = json.load(fp)
return d
# Helper function for parsing command-line arguments.
def parse_args():
parser = argparse.ArgumentParser(description="Evaluate model using specified prompts")
parser.add_argument("--model", "-M", type=str, help="Name of model")
parser.add_argument("--model_type", type=str, choices=["openai", "hf"])
parser.add_argument("--key", "-K", type=str, default="key.txt",
help="Path to file with secret OpenAI API key")
parser.add_argument("--seed", "-S", type=int, default=0,
help="Random seed for reproducibility")
parser.add_argument("--eval_type", type=str, default="direct",
choices=[
"direct",
"metaQuestionSimple",
"metaInstruct",
"metaQuestionComplex"
],
help="Type of evaluation (for prompt design)")
parser.add_argument("--option_order", type=str, default="goodFirst",
choices=["goodFirst", "badFirst"]),
parser.add_argument("--data_file", type=str,
help="Path to data containing prefixes for next-word prediction task")
parser.add_argument("--out_file", type=str,
help="Path to save output JSON file")
parser.add_argument("--dist_folder", type=str, default=None,
help="(OPTIONAL) path to folder to save distribution files (as .npy)")
args = parser.parse_args()
return args
# Helper function for initializing models.
def initialize_model(args):
# Set device to GPU if cuda is available.
if torch.cuda.is_available():
device = torch.device("cuda")
print("Set device to CUDA")
else:
device = torch.device("cpu")
print("Using CPU (CUDA unvailable); adjust your expectations")
# Initialize model based on model type and name.
if args.model_type == "openai":
# Secret file with API key (DO NOT commit this)
openai_api.set_key_from_file(args.key)
model = models.OpenAI_LLM(args.eval_type, args.model, args.seed)
else:
if "flan-t5" in args.model:
model = models.T5_LLM(args.eval_type, args.model, args.seed, device=device)
else:
raise ValueError(
f"Model not supported! (Your model: {args.model})"
)
return model | [] |
2024-01-10 | AI-Jie01/genai | genai~generate.py | import openai
NOTEBOOK_CREATE_NEXT_CELL_PROCLAMATION = """
As a coding assistant, your task is to help users write code in Python within Jupyter Notebooks. Provide comments and code for the user to read and edit, ensuring it can be run successfully. The user will be able to run the code in the cell and see the output.
""".strip() # noqa: E501
NOTEBOOK_ERROR_DIAGNOSER_PROCLAMATION = """
As a coding assistant, you'll diagnose errors in Python code written in a Jupyter Notebook. Format your response using markdown. Making sure to include the language around code blocks, like
```python
# code
```
Provide concise code examples in your response which will be rendered in Markdown in the notebook. The user will not be able to respond to your response.
""".strip() # noqa: E501
def content(completion):
return completion["choices"][0]["message"]["content"]
def deltas(completion):
for chunk in completion:
delta = chunk["choices"][0]["delta"]
if "content" in delta:
yield delta["content"]
def generate_next_cell(
context, # List[Dict[str, str]]
text,
stream=False,
):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
# Establish the context in which GPT will respond
{
"role": "system",
"content": NOTEBOOK_CREATE_NEXT_CELL_PROCLAMATION,
},
# In, Out
*context,
# The user code/text
{
"role": "user",
"content": text,
},
],
stream=stream,
)
if stream:
yield from deltas(response)
else:
yield content(response)
def generate_exception_suggestion(
# The user's code
code,
# The exception with traceback
etype,
evalue,
plaintext_traceback,
stream=False,
):
# Cap our error report at ~1024 characters
error_report = f"{etype.__name__}: {evalue}\n{plaintext_traceback}"
if len(error_report) > 1024:
error_report = error_report[:1024] + "\n..."
messages = []
messages.append(
# Establish the context in which GPT will respond with role: assistant
{
"role": "system",
"content": NOTEBOOK_ERROR_DIAGNOSER_PROCLAMATION,
},
)
if code is not None:
messages.append(
# The user sent code
{"role": "user", "content": code}
)
messages.append(
# The system wrote back with the error
{
"role": "system",
"content": error_report,
},
)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
stream=stream,
)
if stream:
yield from deltas(response)
else:
yield content(response)
| [] |
2024-01-10 | dSupertramp/cheshire-cat | core~cat~looking_glass~agent_manager.py | from typing import List
from cat.utils import log
from langchain.agents import Tool, AgentExecutor, ConversationalAgent, load_tools
from langchain.chains import LLMChain
class AgentManager:
def __init__(self, llm, tools: List[Tool], verbose=False):
self.verbose = verbose
self.llm = llm
self.set_tools(tools)
def set_tools(self, tools: List[Tool]):
default_tools_name = ["llm-math", "python_repl", "terminal"]
default_tools = load_tools(default_tools_name, llm=self.llm)
self.tools = tools + default_tools
self.tool_names = [t.name for t in self.tools]
def get_agent_executor(
self,
prefix_prompt: str,
suffix_prompt: str,
# ai_prefix: str,
# human_prefix: str,
input_variables: List[str],
return_intermediate_steps: bool,
):
prompt = ConversationalAgent.create_prompt(
self.tools,
prefix=prefix_prompt,
suffix=suffix_prompt,
ai_prefix="AI",
human_prefix="Human",
input_variables=input_variables,
)
if self.verbose:
log("Using prompt template:")
log(prompt.template)
# main chain
agent_chain = LLMChain(prompt=prompt, llm=self.llm, verbose=self.verbose)
# init agent
agent = ConversationalAgent(
llm_chain=agent_chain,
allowed_tools=self.tool_names,
verbose=self.verbose,
)
# agent executor
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent,
tools=self.tools,
return_intermediate_steps=return_intermediate_steps,
verbose=self.verbose,
)
return agent_executor
| [
"Human"
] |
2024-01-10 | dSupertramp/cheshire-cat | core~cat~mad_hatter~core_plugin~hooks~models.py | import os
import cat.factory.llm as llms
import cat.factory.embedder as embedders
from cat.db import crud
from langchain.llms import OpenAI, OpenAIChat
from cat.mad_hatter.decorators import hook
@hook(priority=0)
def get_language_model(cat):
selected_llm = crud.get_setting_by_name(next(cat.db()), name="llm_selected")
if selected_llm is None:
# return default LLM
llm = llms.LLMDefaultConfig.get_llm_from_config({})
else:
# get LLM factory class
selected_llm_class = selected_llm.value["name"]
FactoryClass = getattr(llms, selected_llm_class)
# obtain configuration and instantiate LLM
selected_llm_config = crud.get_setting_by_name(
next(cat.db()), name=selected_llm_class
)
llm = FactoryClass.get_llm_from_config(selected_llm_config.value)
return llm
"""
if "OPENAI_KEY" in os.environ:
llm = llms.LLMOpenAIChatConfig.get_llm_from_config(
{
"openai_api_key": os.environ["OPENAI_KEY"],
# "model_name": "gpt-3.5-turbo" # TODO: allow optional kwargs
}
)
elif "COHERE_KEY" in os.environ:
llm = llms.LLMCohereConfig.get_llm_from_config(
{"cohere_api_key": os.environ["COHERE_KEY"], "model": "command"}
)
elif "HF_TOKEN" in os.environ:
if "HF_CHECKPOINT" in os.environ:
llm = llms.LLMHuggingFaceHubConfig.get_llm_from_config(
{
"huggingfacehub_api_token": os.environ["HF_TOKEN"],
"repo_id": os.environ["HF_CHECKPOINT"],
}
)
elif "HF_ENDPOINT_URL" in os.environ:
llm = llms.LLMHuggingFaceEndpointConfig.get_llm_from_config(
{
"huggingfacehub_api_token": os.environ["HF_TOKEN"],
"endpoint_url": os.environ["HF_ENDPOINT_URL"],
}
)
else:
llm = llms.LLMHuggingFaceHubConfig.get_llm_from_config(
{
"huggingfacehub_api_token": os.environ["HF_TOKEN"],
"repo_id": "google/flan-t5-xl",
}
)
else:
llm = llms.LLMDefaultConfig.get_llm_from_config({})
return llm
"""
@hook(priority=0)
def get_language_embedder(cat):
# TODO: give more example ocnfigurations
# Embedding LLM
using_openai_llm = type(cat.llm) in [OpenAI, OpenAIChat]
if ("OPENAI_KEY" in os.environ) or using_openai_llm:
openai_key = os.getenv("OPENAI_KEY")
if openai_key is None:
openai_key = cat.llm.openai_api_key
embedder = embedders.EmbedderOpenAIConfig.get_embedder_from_config(
{
"openai_api_key": openai_key,
# model_name: '....' # TODO: allow optional kwargs
}
)
elif "COHERE_KEY" in os.environ:
embedder = embedders.EmbedderCohereConfig.get_embedder_from_config(
{"cohere_api_key": os.environ["COHERE_KEY"]}
)
elif "HF_TOKEN" in os.environ:
if "HF_EMBEDDER" in os.environ:
embedder = embedders.EmbedderHuggingFaceHubConfig.get_embedder_from_config(
{
"huggingfacehub_api_token": os.environ["HF_TOKEN"],
"repo_id": os.environ["HF_EMBEDDER"],
}
)
else:
embedder = embedders.EmbedderHuggingFaceHubConfig.get_embedder_from_config(
{
"huggingfacehub_api_token": os.environ["HF_TOKEN"],
# repo_id: "..." TODO: at the moment use default
}
)
else:
embedder = embedders.EmbedderFakeConfig.get_embedder_from_config(
{"size": 1536} # mock openai embedding size
)
return embedder
| [] |
2024-01-10 | dSupertramp/cheshire-cat | core~cat~mad_hatter~decorators.py | from typing import Any, List, Union, Callable
from inspect import signature
from langchain.tools import BaseTool
from langchain.agents import Tool
# Cat hooks manager
class CatHooks:
__hooks: List = []
@classmethod
def reset_hook_list(cls):
CatHooks.__hooks = []
@classmethod
def sort_hooks(cls):
# CatHooks.__hooks.sort(key=lambda x: x.count, reverse=True)
CatHooks.__hooks.sort(key=lambda x: x["priority"], reverse=True)
return CatHooks.__hooks
# append a hook
@classmethod
def add_hook(cls, hook):
CatHooks.__hooks.append(hook)
# get hook list
@classmethod
def get_hook_list(cls):
return CatHooks.__hooks
# @hook decorator. Any function in a plugin decorated by @hook and named properly (among list of available hooks) is used by the Cat
# @hook priority defaults to 1, the higher the more important. Hooks in the default core plugin have all priority=0 so they are automatically overwritten from plugins
def hook(_func=None, priority=1) -> Any:
def decorator(func):
def cat_hook_wrapper(*args, **kargs):
return func(*args, **kargs)
doc_string = func.__doc__
if doc_string is None:
doc_string = ""
CatHooks.add_hook(
{
"hook_function": cat_hook_wrapper,
"hook_name": func.__name__,
"docstring": func.__doc__,
"priority": float(priority),
"count": len(CatHooks.get_hook_list()),
}
)
if _func is None:
return decorator
else:
return decorator(_func)
# All @tool decorated functions in plugins become a CatTool.
# The difference between base langchain Tool and CatTool is that CatTool has an instance of the cat as attribute (set by the MadHatter)
class CatTool(Tool):
# used by the MadHatter while loading plugins in order to let a Tool access the cat instance
def set_cat_instance(self, cat_instance):
self.cat = cat_instance
def _run(self, input_by_llm):
return self.func(input_by_llm, cat=self.cat)
async def _arun(self, input_by_llm):
# should be used for async Tools, just using sync here
return self._run(input_by_llm)
# override `extra = 'forbid'` for Tool pydantic model in langchain
class Config:
extra = "allow"
# @tool decorator, a modified version of a langchain Tool that also takes a Cat instance as argument
# adapted from https://github.com/hwchase17/langchain/blob/master/langchain/agents/tools.py
def tool(*args: Union[str, Callable], return_direct: bool = False) -> Callable:
"""Make tools out of functions, can be used with or without arguments.
Requires:
- Function must be of type (str) -> str
- Function must have a docstring
Examples:
.. code-block:: python
@tool
def search_api(query: str) -> str:
# Searches the API for the query.
return
@tool("search", return_direct=True)
def search_api(query: str) -> str:
# Searches the API for the query.
return
"""
def _make_with_name(tool_name: str) -> Callable:
def _make_tool(func: Callable[[str], str]) -> Tool:
assert func.__doc__, "Function must have a docstring"
# Description example:
# search_api(query: str) - Searches the API for the query.
description = f"{tool_name}{signature(func)} - {func.__doc__.strip()}"
tool_ = CatTool(
name=tool_name,
func=func,
description=description,
return_direct=return_direct,
)
return tool_
return _make_tool
if len(args) == 1 and isinstance(args[0], str):
# if the argument is a string, then we use the string as the tool name
# Example usage: @tool("search", return_direct=True)
return _make_with_name(args[0])
elif len(args) == 1 and callable(args[0]):
# if the argument is a function, then we use the function name as the tool name
# Example usage: @tool
return _make_with_name(args[0].__name__)(args[0])
elif len(args) == 0:
# if there are no arguments, then we use the function name as the tool name
# Example usage: @tool(return_direct=True)
def _partial(func: Callable[[str], str]) -> BaseTool:
return _make_with_name(func.__name__)(func)
return _partial
else:
raise ValueError("Too many arguments for tool decorator")
| [] |
2024-01-10 | dSupertramp/cheshire-cat | core~cat~looking_glass~cheshire_cat.py | import time
import traceback
from typing import Union
from datetime import timedelta
import langchain
from cat.utils import log
from cat.utils import verbal_timedelta
from cat.db.database import get_db_session, create_db_and_tables
from cat.rabbit_hole import RabbitHole
from starlette.datastructures import UploadFile
from cat.mad_hatter.mad_hatter import MadHatter
from langchain.chains.summarize import load_summarize_chain
from cat.memory.long_term_memory import LongTermMemory
from langchain.docstore.document import Document
from cat.looking_glass.agent_manager import AgentManager
# main class
class CheshireCat:
def __init__(self, verbose=True):
self.verbose = verbose
# access to DB
self.load_db()
# bootstrap the cat!
self.bootstrap()
# queue of cat messages not directly related to last user input
# i.e. finished uploading a file
self.web_socket_notifications = []
def bootstrap(self):
"""This method is called when the cat is instantiated and
has to be called whenever LLM, embedder, agent or memory need to be reinstantiated
(for example an LLM change at runtime)
"""
self.load_plugins()
self.load_natural_language()
self.load_memory()
self.load_agent()
# Rabbit Hole Instance
self.rabbit_hole = RabbitHole()
def load_db(self):
# if there is no db, create it
create_db_and_tables()
# access db from instance
self.db = get_db_session
def load_natural_language(self):
# LLM and embedder
self.llm = self.mad_hatter.execute_hook("get_language_model")
self.embedder = self.mad_hatter.execute_hook("get_language_embedder")
# Prompts
self.prefix_prompt = self.mad_hatter.execute_hook("get_main_prompt_prefix")
self.suffix_prompt = self.mad_hatter.execute_hook("get_main_prompt_suffix")
# HyDE chain
hypothesis_prompt = langchain.PromptTemplate(
input_variables=["input"],
template=self.mad_hatter.execute_hook("get_hypothetical_embedding_prompt"),
)
self.hypothetis_chain = langchain.chains.LLMChain(
prompt=hypothesis_prompt, llm=self.llm, verbose=True
)
self.summarization_prompt = self.mad_hatter.execute_hook("get_summarization_prompt")
# custom summarization chain
self.summarization_chain = langchain.chains.LLMChain(
llm=self.llm,
verbose=False,
prompt=langchain.PromptTemplate(
template=self.summarization_prompt, input_variables=["text"]
),
)
# TODO: can input vars just be deducted from the prompt? What about plugins?
self.input_variables = [
"input",
"chat_history",
"episodic_memory",
"declarative_memory",
"agent_scratchpad",
]
def load_memory(self):
# Memory
vector_memory_config = {"embedder": self.embedder, "verbose": True}
self.memory = LongTermMemory(vector_memory_config=vector_memory_config)
def load_plugins(self):
# recent conversation # TODO: load from episodic memory latest conversation messages
self.history = ""
# Load plugin system
self.mad_hatter = MadHatter(self)
def load_agent(self):
self.agent_manager = AgentManager(
llm=self.llm,
tools=self.mad_hatter.tools,
verbose=self.verbose,
) # TODO: load agent from plugins? It's gonna be a MESS
self.agent_executor = self.agent_manager.get_agent_executor(
prefix_prompt=self.prefix_prompt,
suffix_prompt=self.suffix_prompt,
# ai_prefix="AI",
# human_prefix="Human",
input_variables=self.input_variables,
return_intermediate_steps=True,
)
# TODO: this should be a hook
def format_memories_for_prompt(self, memory_docs, return_format=str):
memory_texts = [m[0].page_content.replace("\n", ". ") for m in memory_docs]
# TODO: take away duplicates
memory_timestamps = []
for m in memory_docs:
timestamp = m[0].metadata["when"]
delta = timedelta(seconds=(time.time() - timestamp))
memory_timestamps.append(" ("+verbal_timedelta(delta)+")")
memory_texts = [a+b for a,b in zip(memory_texts,memory_timestamps)]
# TODO: insert sources in document memories
if return_format == str:
memories_separator = "\n - "
memory_content = memories_separator + memories_separator.join(memory_texts)
else:
memory_content = memory_texts
if self.verbose:
log(memory_content)
return memory_content
def get_hyde_text_and_embedding(self, text):
# HyDE text
hyde_text = self.hypothetis_chain.run(text)
if self.verbose:
log(hyde_text)
# HyDE embedding
hyde_embedding = self.embedder.embed_query(hyde_text)
return hyde_text, hyde_embedding
# iterative summarization
def get_summary_text(self, docs, group_size=3):
# service variable to store intermediate results
intermediate_summaries = docs
# we will store iterative summaries all together in a list
all_summaries = []
# loop until there are no groups to summarize
root_summary_flag = False
while not root_summary_flag:
# make summaries of groups of docs
intermediate_summaries = [
self.summarization_chain.run(intermediate_summaries[i : i + group_size])
for i in range(0, len(intermediate_summaries), group_size)
]
intermediate_summaries = [
Document(page_content=summary) for summary in intermediate_summaries
]
# update list of all summaries
all_summaries = intermediate_summaries + all_summaries
# did we reach root summary?
root_summary_flag = len(intermediate_summaries) == 1
if self.verbose:
log(
f"Building summaries over {len(intermediate_summaries)} chunks. Please wait."
)
# return root summary and all intermediate summaries
return all_summaries[0], all_summaries[1:]
def send_file_in_rabbit_hole(
self,
file: Union[str, UploadFile],
chunk_size: int = 400,
chunk_overlap: int = 100,
):
"""
Load a given file in the Cat's memory.
:param file: absolute path of the file or UploadFile if ingested from the GUI
:param chunk_size: number of characters the text is split in
:param chunk_overlap: number of overlapping characters between consecutive chunks
"""
# split file into a list of docs
docs = RabbitHole.file_to_docs(
file=file, chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
# get summaries
summary, intermediate_summaries = self.get_summary_text(docs)
docs = [summary] + intermediate_summaries + docs
# store in memory
if isinstance(file, str):
filename = file
else:
filename = file.filename
RabbitHole.store_documents(ccat=self, docs=docs, source=filename)
def send_url_in_rabbit_hole(
self,
url: str,
chunk_size: int = 400,
chunk_overlap: int = 100,
):
"""
Load a given website in the Cat's memory.
:param url: URL of the website to which you want to save the content
:param chunk_size: number of characters the text is split in
:param chunk_overlap: number of overlapping characters between consecutive chunks
"""
# get website content and split into a list of docs
docs = RabbitHole.url_to_docs(
url=url, chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
#get summaries and store
summary, intermediate_summaries = self.get_summary_text(docs)
docs = [summary] + intermediate_summaries + docs
RabbitHole.store_documents(ccat=self, docs=docs, source=url)
def __call__(self, user_message):
if self.verbose:
log(user_message)
hyde_text, hyde_embedding = self.get_hyde_text_and_embedding(user_message)
try:
# recall relevant memories (episodic)
episodic_memory_content = (
self.memory.vectors.episodic.recall_memories_from_embedding(
embedding=hyde_embedding
)
)
log(episodic_memory_content)
episodic_memory_formatted_content = self.format_memories_for_prompt(
episodic_memory_content
)
# recall relevant memories (declarative)
declarative_memory_content = (
self.memory.vectors.declarative.recall_memories_from_embedding(
embedding=hyde_embedding
)
)
declarative_memory_formatted_content = self.format_memories_for_prompt(
declarative_memory_content
)
except Exception as e:
log(e)
traceback.print_exc(e)
return {
"error": False,
# TODO: Otherwise the frontend gives notice of the error but does not show what the error is
"content": "Vector memory error: you probably changed Embedder and old vector memory is not compatible. Please delete `core/long_term_memory` folder",
"why": {},
}
# reply with agent
try:
cat_message = self.agent_executor(
{
"input": user_message,
"episodic_memory": episodic_memory_formatted_content,
"declarative_memory": declarative_memory_formatted_content,
"chat_history": self.history,
}
)
except ValueError as e:
# This error happens when the LLM does not respect prompt instructions.
# We grab the LLM outptu here anyway, so small and non instruction-fine-tuned models can still be used.
error_description = str(e)
if not error_description.startswith("Could not parse LLM output: `"):
raise e
unparsable_llm_output = error_description.removeprefix(
"Could not parse LLM output: `"
).removesuffix("`")
cat_message = {"output": unparsable_llm_output}
if self.verbose:
log(cat_message)
# update conversation history
self.history += f"Human: {user_message}\n"
self.history += f'AI: {cat_message["output"]}\n'
# store user message in episodic memory
# TODO: vectorize and store also conversation chunks (not raw dialog, but summarization)
_ = self.memory.vectors.episodic.add_texts(
[user_message],
[
{
"source": "user",
"when": time.time(),
"text": user_message,
}
],
)
# build data structure for output (response and why with memories)
final_output = {
"error": False,
"content": cat_message["output"],
"why": {
**cat_message,
"memory": {
"vectors": {
"episodic": [
dict(d[0]) | {"score": float(d[1])}
for d in episodic_memory_content
],
"declarative": [
dict(d[0]) | {"score": float(d[1])}
for d in declarative_memory_content
],
}
},
},
}
final_output = self.mad_hatter.execute_hook("before_returning_response_to_user", final_output)
return final_output
| [
"Vector memory error: you probably changed Embedder and old vector memory is not compatible. Please delete `core/long_term_memory` folder",
"input",
"get_hypothetical_embedding_prompt"
] |
2024-01-10 | dSupertramp/cheshire-cat | core~cat~memory~vector_memory.py | import os
import time
from typing import Any, Callable
from cat.utils import log
from qdrant_client import QdrantClient
from langchain.vectorstores import Qdrant
from qdrant_client.http.models import Distance, VectorParams
class VectorMemory:
def __init__(self, verbose=False, embedder=None) -> None:
self.verbose = verbose
if embedder is None:
raise Exception("No embedder passed to VectorMemory")
self.embedder = embedder
# Qdrant vector DB client
self.vector_db = QdrantClient(
host=os.getenv("VECTOR_MEMORY_HOST", "vector-memory"),
port=int(os.getenv("VECTOR_MEMORY_PORT", 6333)),
)
# Episodic memory will contain user and eventually cat utterances
self.episodic = VectorMemoryCollection(
client=self.vector_db,
collection_name="episodic",
embedding_function=self.embedder.embed_query,
)
# Declarative memory will contain uploaded documents' content (and summaries)
self.declarative = VectorMemoryCollection(
client=self.vector_db,
collection_name="declarative",
embedding_function=self.embedder.embed_query,
)
# Dictionary containing all collections
# Useful for cross-searching and to create/use collections from plugins
self.collections = {"episodic": self.episodic, "declarative": self.declarative}
class VectorMemoryCollection(Qdrant):
def __init__(self, client: Any, collection_name: str, embedding_function: Callable):
super().__init__(client, collection_name, embedding_function)
self.create_collection_if_not_exists()
def create_collection_if_not_exists(self):
# create collection if it does not exist
try:
self.client.get_collection(self.collection_name)
tabula_rasa = False
log(f'Collection "{self.collection_name}" already present in vector store')
except:
log(f"Creating collection {self.collection_name} ...")
self.client.recreate_collection(
collection_name=self.collection_name,
vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
# TODO: if we change the embedder, how do we know the dimensionality?
)
tabula_rasa = True
# TODO: if the embedder changed, a new vectorstore must be created
if tabula_rasa:
# insert first point in the collection
self.add_texts(
["I am the Cheshire Cat"],
[
{
"source": "cheshire-cat",
"when": time.time(),
"text": "I am the Cheshire Cat",
}
],
)
log(dict(self.client.get_collection(self.collection_name)))
# retrieve similar memories from text
def recall_memories_from_text(self, text, metadata=None, k=5):
# embed the text
query_embedding = self.embedding_function(text)
# search nearest vectors
return self.recall_memories_from_embedding(
query_embedding, metadata=metadata, k=k
)
# retrieve similar memories from embedding
def recall_memories_from_embedding(self, embedding, metadata=None, k=5):
# retrieve memories
memories = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
query_filter=self._qdrant_filter_from_dict(metadata),
with_payload=True,
limit=k,
)
return [
(
self._document_from_scored_point(
m, self.content_payload_key, self.metadata_payload_key
),
m.score,
)
for m in memories
]
| [] |
2024-01-10 | dSupertramp/cheshire-cat | core~cat~rabbit_hole.py | import os
import time
import tempfile
import mimetypes
from typing import List, Union
from cat.utils import log
from langchain.text_splitter import RecursiveCharacterTextSplitter
from starlette.datastructures import UploadFile
from langchain.document_loaders import (
PDFMinerLoader,
UnstructuredFileLoader,
UnstructuredMarkdownLoader,
UnstructuredURLLoader,
)
from langchain.docstore.document import Document
class RabbitHole:
def __init__(self):
pass
@staticmethod
def url_to_docs(
url: str,
chunk_size: int = 400,
chunk_overlap: int = 100,
) -> List[Document]:
"""
Scrape website content and chunk it to a list of Documents.
:param url: URL of the website to which you want to save the content
:param chunk_size: number of characters the text is split in
:param chunk_overlap: number of overlapping characters between consecutive chunks
"""
# load text content of the website
loader = UnstructuredURLLoader(urls=[url])
text = loader.load()
# split in documets using chunk_size and chunk_overlap
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=["\\n\\n", "\n\n", ".\\n", ".\n", "\\n", "\n", " ", ""],
)
docs = text_splitter.split_documents(text)
return docs
@staticmethod
def file_to_docs(
file: Union[str, UploadFile],
chunk_size: int = 400,
chunk_overlap: int = 100,
) -> List[Document]:
"""
Parse a file and chunk it to a list of Documents.
The file can either be ingested from the web GUI or using the Cat *send_file_in_rabbit_hole* method.
:param file: absolute path of the file or UploadFile if ingested from the GUI
:param chunk_size: number of characters the text is split in
:param chunk_overlap: number of overlapping characters between consecutive chunks
"""
# Create temporary file
temp_file = tempfile.NamedTemporaryFile(dir=".", delete=False)
temp_name = temp_file.name
# Check type of incoming file.
# It can be either UploadFile if coming from GUI or an absolute path if auto-ingested be the Cat
if isinstance(file, UploadFile):
# Get mime type of UploadFile
# content_type = file.content_type
content_type = mimetypes.guess_type(file.filename)[0]
# Get file bytes
file_bytes = file.file.read()
elif isinstance(file, str):
# Get mime type from file extension
content_type = mimetypes.guess_type(file)[0]
# Get file bytes
with open(file, "rb") as f:
file_bytes = f.read()
else:
raise ValueError(f"{type(file)} is not a valid type.")
# Open temp file in binary write mode
with open(temp_name, "wb") as temp_binary_file:
# Write bytes to file
temp_binary_file.write(file_bytes)
# decide loader
if content_type == "text/plain":
loader = UnstructuredFileLoader(temp_name)
elif content_type == "text/markdown":
loader = UnstructuredMarkdownLoader(temp_name)
elif content_type == "application/pdf":
loader = PDFMinerLoader(temp_name)
else:
raise Exception("MIME type not supported for upload")
# extract text from file
text = loader.load()
# delete tmp file
os.remove(temp_name)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=["\\n\\n", "\n\n", ".\\n", ".\n", "\\n", "\n", " ", ""],
)
docs = text_splitter.split_documents(text)
log(f"Preparing to clean {len(docs)} text chunks")
# remove short texts (page numbers, isolated words, etc.)
docs = list(filter(lambda d: len(d.page_content) > 10, docs))
return docs
@staticmethod # should this method be inside of ccat?
def store_documents(ccat, docs: List[Document], source: str) -> None:
"""
Load a list of Documents in the Cat's declarative memory.
:param ccat: reference to the cat instance
:param docs: a list of documents to store in memory
:param source: a string representing the source, either the file name or the website URL
"""
log(f"Preparing to memorize {len(docs)} vectors")
# classic embed
for d, doc in enumerate(docs):
_ = ccat.memory.vectors.declarative.add_texts(
[doc.page_content],
[
{
"source": source,
"when": time.time(),
"text": doc.page_content,
}
],
)
log(f"Inserted into memory ({d + 1}/{len(docs)}): {doc.page_content}")
time.sleep(0.1)
# notify client
ccat.web_socket_notifications.append(
{
"error": False,
"content": f"Finished reading {source}, I made {len(docs)} thoughts on it.",
"why": {},
}
)
log("Done uploading")
| [
"Finished reading PLACEHOLDER, I made 1 thoughts on it."
] |
2024-01-10 | fariiaakh/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | MistaAsh/Donna | middleware~constants.py | from langchain.schema import SystemMessage
from dotenv import load_dotenv
import os
load_dotenv()
# Web3 Constants
WEB3_HTTP_PROVIDER_URI = os.environ.get("WEB3_HTTP_PROVIDER_URI")
# OpenAI Constants
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
# Supabase Constants
SUPABASE_URL = os.environ.get("SUPABASE_URL")
SUPABASE_KEY = os.environ.get("SUPABASE_KEY")
# Airstack Constants
AIRSTACK_API_KEY = os.environ.get("AIRSTACK_API_KEY")
# ChatGPT System Prompt
SYSTEM_MESSAGE = SystemMessage(
content="""
You are a specialized AI, designed to act as an chatbot that help facilitate blockchain transaction
-- When asked to generate a contract invoke the generate contract function while passing in the english description of the task
"""
)
AGENT_KWARGS = {"system_message": SYSTEM_MESSAGE}
RPC_URL = {
"1": "https://cloudflare-eth.com",
"137": "https://polygon-rpc.com/",
"5000": "https://rpc.mantle.xyz/",
"8453": "https://base.drpc.org",
# "goerli": "https://goerli.drpc.org/",
# "optimism": "https://mainnet.optimism.io",
# "gnosis": "https://gnosis.drpc.org/",
# "mumbai": "https://polygon-mumbai.drpc.org/",
# "mantle": "https://mantle-testnet.drpc.org/",
# "arbitrum": "https://arbitrum.drpc.org/",
}
ERC20_SYMBOL_TO_ADDRESS = {
"1": {
"ETH": "ETH",
"WETH": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"USDT": "0xdac17f958d2ee523a2206206994597c13d831ec7",
"USDC": "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48",
"MATIC": "0x7d1afa7b718fb893db30a3abc0cfc608aacfebb0",
"DAI": "0x6b175474e89094c44da98b954eedeac495271d0f",
},
"137": {
"WETH": "0x7ceB23fD6bC0adD59E62ac25578270cFf1b9f619",
"WMATIC": "0x0d500B1d8E8eF31E21C99d1Db9A6444d3ADf1270",
},
"5000": {
},
"8453": {
},
}
| [
"\n You are a specialized AI, designed to act as an chatbot that help facilitate blockchain transaction\n -- When asked to generate a contract invoke the generate contract function while passing in the english description of the task\n "
] |
2024-01-10 | MistaAsh/Donna | middleware~tools~contract.py | from openai import OpenAI
from constants import OPENAI_API_KEY
class Contract:
"""
Middleware to handle contract related requests
"""
def create_and_deploy_contract(self, contract_name, contract_description):
"""
Calls the OpenAI API to generate a contract from the contract and calls a server to deploy it
"""
error, payload = False, {}
try:
client = OpenAI(
api_key = OPENAI_API_KEY,
)
prompt = f"""
Generate a Solidity smart contract with the following description:
Contract Name: {contract_name}
Contract Description: {contract_description}
When generating the constructor DO NOT TAKE ANY PARAMETERS. e.g: constructor()
Additionally,
1. The contract should always have this line at the top of the file: `// SPDX-License-Identifier: MIT`
2. The contract code should be enclosed in a ```solidity ``` code block
3. Don't use openzepplin counter or ownable contracts
"""
chat_completion = client.chat.completions.create(
messages=[
{
"role": "system",
"content": prompt,
}
],
model="gpt-4-1106-preview",
)
payload = chat_completion.choices[0].message.content
except Exception as e:
error = e
return {"method": "create_and_deploy_tcontract", "error": error, "payload": payload}
| [
"\n Generate a Solidity smart contract with the following description:\n Contract Name: PLACEHOLDER\n Contract Description: PLACEHOLDER\n When generating the constructor DO NOT TAKE ANY PARAMETERS. e.g: constructor()\n Additionally,\n 1. The contract should always have this line at the top of the file: `// SPDX-License-Identifier: MIT`\n 2. The contract code should be enclosed in a ```solidity ``` code block\n 3. Don't use openzepplin counter or ownable contracts\n "
] |
2024-01-10 | MistaAsh/Donna | middleware~imports.py | from tools.account import Account
from tools.socials import Socials
from tools.contract import Contract
from constants import *
from schema import *
from flask import Flask, jsonify, request
from pydantic import BaseModel
from typing import Type
from concurrent.futures import ThreadPoolExecutor
from supabase import create_client, Client
from web3 import Web3
from langchain.agents import AgentType, initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.tools import BaseTool
from airstack.execute_query import AirstackClient
import re, json, asyncio
import requests | [] |
2024-01-10 | ambarishg/aws-open-search | azure_openai_helper.py | import openai
key = 'YOUR_KEY'
location = 'YOUR_LOCATION'
endpoint = 'YOUR_ENDPOINT'
openai.api_type = "azure"
openai.api_key = key
openai.api_base = endpoint
deployment_id_gpt4='YOUR_DEPLOYMENT_ID'
openai.api_key = key
def create_prompt(context,query):
header = "Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text and requires some latest information to be updated, print 'Sorry Not Sufficient context to answer query' \n"
return header + context + "\n\n" + query + "\n"
def generate_answer(conversation):
openai.api_version = "2023-03-15-preview"
response = openai.ChatCompletion.create(
engine=deployment_id_gpt4,
messages=conversation,
temperature=0,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop = [' END']
)
return (response['choices'][0]['message']['content']).strip()
def generate_answer_from_context(user_input, context):
conversation=[{"role": "system", "content": "Assistant is a large language model trained by OpenAI."}]
prompt = create_prompt(context,user_input)
conversation.append({"role": "assistant", "content": prompt})
conversation.append({"role": "user", "content": user_input})
reply = generate_answer(conversation)
return reply | [
"Assistant is a large language model trained by OpenAI."
] |
2024-01-10 | filipmazurek/spa-artifact | docker-share~gem5~configs~example~gem5_library~x86-parsec-benchmarks.py | # Copyright (c) 2021 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Script to run PARSEC benchmarks with gem5.
The script expects a benchmark program name and the simulation
size. The system is fixed with 2 CPU cores, MESI Two Level system
cache and 3 GB DDR4 memory. It uses the x86 board.
This script will count the total number of instructions executed
in the ROI. It also tracks how much wallclock and simulated time.
Usage:
------
```
scons build/X86/gem5.opt
./build/X86/gem5.opt \
configs/example/gem5_library/x86-parsec-benchmarks.py \
--benchmark <benchmark_name> \
--size <simulation_size>
```
"""
import argparse
import time
import m5
from m5.objects import Root
from gem5.utils.requires import requires
from gem5.components.boards.x86_board import X86Board
from gem5.components.memory import DualChannelDDR4_2400
from gem5.components.processors.simple_switchable_processor import (
SimpleSwitchableProcessor,
)
from gem5.components.processors.cpu_types import CPUTypes
from gem5.isas import ISA
from gem5.coherence_protocol import CoherenceProtocol
from gem5.resources.resource import Resource
from gem5.simulate.simulator import Simulator
from gem5.simulate.exit_event import ExitEvent
# We check for the required gem5 build.
requires(
isa_required=ISA.X86,
coherence_protocol_required=CoherenceProtocol.MESI_TWO_LEVEL,
# kvm_required=True,
)
# Following are the list of benchmark programs for parsec.
benchmark_choices = [
"blackscholes",
"bodytrack",
"canneal",
"dedup",
"facesim",
"ferret",
"fluidanimate",
"freqmine",
"raytrace",
"streamcluster",
"swaptions",
"vips",
"x264",
]
# Following are the input size.
size_choices = ["simsmall", "simmedium", "simlarge"]
parser = argparse.ArgumentParser(
description="An example configuration script to run the npb benchmarks."
)
# The arguments accepted are the benchmark name and the simulation size.
parser.add_argument(
"--benchmark",
type=str,
required=True,
help="Input the benchmark program to execute.",
choices=benchmark_choices,
)
parser.add_argument(
"--size",
type=str,
required=True,
help="Simulation size the benchmark program.",
choices=size_choices,
)
args = parser.parse_args()
# Setting up all the fixed system parameters here
# Caches: MESI Two Level Cache Hierarchy
from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import (
MESITwoLevelCacheHierarchy,
)
cache_hierarchy = MESITwoLevelCacheHierarchy(
l1d_size="32kB",
l1d_assoc=8,
l1i_size="32kB",
l1i_assoc=8,
l2_size="256kB",
l2_assoc=16,
num_l2_banks=2,
)
# Memory: Dual Channel DDR4 2400 DRAM device.
# The X86 board only supports 3 GB of main memory.
memory = DualChannelDDR4_2400(size="3GB")
# Here we setup the processor. This is a special switchable processor in which
# a starting core type and a switch core type must be specified. Once a
# configuration is instantiated a user may call `processor.switch()` to switch
# from the starting core types to the switch core types. In this simulation
# we start with KVM cores to simulate the OS boot, then switch to the Timing
# cores for the command we wish to run after boot.
processor = SimpleSwitchableProcessor(
# starting_core_type=CPUTypes.KVM,
starting_core_type=CPUTypes.TIMING,
switch_core_type=CPUTypes.O3,
isa=ISA.X86,
num_cores=2,
)
# Here we setup the board. The X86Board allows for Full-System X86 simulations
board = X86Board(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
# Here we set the FS workload, i.e., parsec benchmark
# After simulation has ended you may inspect
# `m5out/system.pc.com_1.device` to the stdout, if any.
# After the system boots, we execute the benchmark program and wait till the
# ROI `workbegin` annotation is reached (m5_work_begin()). We start collecting
# the number of committed instructions till ROI ends (marked by `workend`).
# We then finish executing the rest of the benchmark.
# Also, we sleep the system for some time so that the output is printed
# properly.
command = (
"cd /home/gem5/parsec-benchmark;".format(args.benchmark)
+ "source env.sh;"
+ "parsecmgmt -a run -p {} -c gcc-hooks -i {} \
-n {};".format(
args.benchmark, args.size, "2"
)
+ "sleep 5;"
+ "m5 exit;"
)
board.set_kernel_disk_workload(
# The x86 linux kernel will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
# PARSEC benchamarks were tested with kernel version 4.19.83
kernel=Resource(resource_name="x86-linux-kernel-4.19.83"), #, resource_directory="/hpc/group/brownlab/fjm7/downloads/"),
# The x86-parsec image will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
disk_image=Resource(resource_name="x86-parsec"), #, resource_directory="/hpc/group/brownlab/fjm7/downloads/"),
readfile_contents=command,
)
# functions to handle different exit events during the simuation
def handle_workbegin():
print("Done booting Linux")
print("Resetting stats at the start of ROI!")
m5.stats.reset()
processor.switch()
yield False
def handle_workend():
print("Dump stats at the end of the ROI!")
m5.stats.dump()
yield True
simulator = Simulator(
board=board,
on_exit_event={
ExitEvent.WORKBEGIN: handle_workbegin(),
ExitEvent.WORKEND: handle_workend(),
},
)
# We maintain the wall clock time.
globalStart = time.time()
print("Running the simulation")
# print("Using KVM cpu")
print("Using ATOMIC cpu")
m5.stats.reset()
# We start the simulation
simulator.run()
print("All simulation events were successful.")
# We print the final simulation statistics.
print("Done with the simulation")
print()
print("Performance statistics:")
print("Simulated time in ROI: " + ((str(simulator.get_roi_ticks()[0]))))
print(
"Ran a total of", simulator.get_current_tick() / 1e12, "simulated seconds"
)
print(
"Total wallclock time: %.2fs, %.2f min"
% (time.time() - globalStart, (time.time() - globalStart) / 60)
)
| [] |
2024-01-10 | stanford-oval/gpt3-example | neural_worker.py | """
GPT-3 continues a prompt, works with any prompt in .txt format
"""
from typing import List
from tqdm import tqdm
import openai
from multiprocessing import Pool
from functools import partial
import math
class NeuralWorker:
def __init__(self, prompt_template_file: str, engine: str):
self.prompt_template: str = NeuralWorker.load_prompt_template(
prompt_template_file)
self.engine: str = engine
@staticmethod
def load_prompt_template(prompt_template_file: str) -> str:
prompt_template = ''
with open(prompt_template_file) as prompt_template_file:
for line in prompt_template_file:
if line.startswith('#'):
continue # ignore comment lines in the template
prompt_template += line
return prompt_template
def generate(self, input_text: str, args, postprocess=True, max_tries=1) -> str:
"""
text-in-text-out interface to large OpenAI models
"""
# print('input_text = ', input_text)
# don't try multiple times if the temperature is 0, because the results will be the same
if max_tries > 1 and args.temperature == 0:
max_tries = 1
# try at most `max_tries` times to get a non-empty output
for _ in range(max_tries):
generation_output = openai.Completion.create(engine=self.engine,
prompt=input_text,
max_tokens=args.max_tokens,
temperature=args.temperature,
top_p=args.top_p,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
best_of=1,
stop=args.stop_tokens,
logprobs=0, # log probability of top tokens
)
# print('raw generation output = ', generation_output)
# print('='*10)
generation_output = generation_output['choices'][0]['text']
generation_output = generation_output.strip()
if postprocess:
generation_output = self._postprocess_generations(
generation_output)
if len(generation_output) > 0:
break
return generation_output
def batch_generate(self, input_texts: List[str], args, postprocess=True, max_tries=1, num_processes=5) -> List[str]:
"""
Call OpenAI's API in parallel, since each call to the biggest model takes ~1 second to return results
"""
f = partial(self.generate, args=args,
postprocess=postprocess, max_tries=max_tries)
with Pool(num_processes) as p:
worker_outputs = list(
tqdm(p.imap(f, input_texts), total=len(input_texts)))
return worker_outputs
def classify(self, input_text: str) -> float:
"""
Binary classification interface to OpenAI models. The class labels are assumed to be ' Yes' and ' No' tokens, including the space
Returns the probability (between 0 and 1) of the positive class (i.e. the ' Yes' label)
"""
# print('input_text = ', input_text)
generation_output = openai.Completion.create(engine=self.engine,
prompt=input_text,
max_tokens=1,
temperature=0,
top_p=1.0,
frequency_penalty=0,
presence_penalty=0,
best_of=1,
logprobs=10, # returns the log probability of this many top tokens
)
# print('raw generation output = ', generation_output)
# print('='*10)
logprobs = generation_output['choices'][0]['logprobs']['top_logprobs'][0]
if ' Yes' not in logprobs and ' No' not in logprobs:
print('Warning: the logrpob did not contain any of the classification labels.')
pos_log = logprobs.get(' Yes', -10000)
neg_log = logprobs.get(' No', -10000)
return math.exp(pos_log) / (math.exp(pos_log)+math.exp(neg_log))
def batch_classify(self, input_texts: List[str], num_processes=5) -> List[float]:
"""
Call OpenAI's API in parallel. Is useful because each call to the biggest model takes ~1 second to return results
"""
f = partial(self.classify)
with Pool(num_processes) as p:
worker_outputs = list(
tqdm(p.imap(f, input_texts), total=len(input_texts)))
return worker_outputs
def _postprocess_generations(self, generation_output: str) -> str:
"""
Might output an empty string if generation is not at least one full sentence
"""
# replace all whitespaces with a single space
generation_output = ' '.join(generation_output.split())
# remove extra dialog turns, if any
if generation_output.find('You: ') > 0:
generation_output = generation_output[:generation_output.find(
'You: ')]
if generation_output.find('They: ') > 0:
generation_output = generation_output[:generation_output.find(
'They: ')]
# delete half sentences
generation_output = generation_output.strip()
if len(generation_output) == 0:
return generation_output
if generation_output[-1] not in {'.', '!', '?'}:
last_sentence_end = max(generation_output.find(
'.'), generation_output.find('!'), generation_output.find('?'))
if last_sentence_end > 0:
generation_output = generation_output[:last_sentence_end+1]
return generation_output
def fill_prompt_template(self, **prompt_parameter_values):
filled_prompt = self.prompt_template
for parameter, value in prompt_parameter_values.items():
filled_prompt = filled_prompt.replace('{'+parameter+'}', value)
# print('filled_prompt = ', filled_prompt)
return filled_prompt
| [] |
2024-01-10 | iboraham/assistant-gpt-cli | assistant~api_wrapper.py | import time
from halo import Halo
from openai import OpenAI
class AssistantAPIWrapper:
"""
A wrapper class for the OpenAI API, managing the assistant, threads, and messages.
"""
def __init__(self, api_key, username, assistant_id=None):
"""
Initializes the API client and sets up basic parameters.
"""
self.client = OpenAI(api_key=api_key)
self.thread = None
self.assistant = None
self.run = None
self.username = username
def _convert_tools(self, tools):
"""
Converts a list of tool names into the format required by the OpenAI API.
"""
return [{"type": tool} for tool in tools]
def create_assistant(
self,
name,
description=None,
model="gpt-4-vision-preview",
instructions=None,
tools=[],
):
"""
Creates a new assistant with the specified parameters.
"""
self.assistant = self.client.beta.assistants.create(
name=name,
description=description,
model=model,
instructions=instructions,
tools=self._convert_tools(tools),
)
def edit_assistant(
self,
name,
description=None,
model="gpt-4-vision-preview",
instructions=None,
tools=[],
):
"""
Edits the existing assistant with new parameters.
"""
self.assistant = self.client.beta.assistants.update(
assistant_id=self.assistant.id,
name=name,
description=description,
model=model,
instructions=instructions,
tools=self._convert_tools(tools),
)
def list_assistants(self):
"""
Retrieves a list of all assistants.
"""
return self.client.beta.assistants.list()
def get_thread(self, thread_id):
"""
Retrieves a specific thread by its ID.
"""
return self.client.beta.threads.retrieve(thread_id=thread_id)
def create_thread(self):
"""
Creates a new thread and stores it in the instance variable.
"""
self.thread = self.client.beta.threads.create()
def add_message_to_thread(self, message, role="user", files=[]):
"""
Adds a message to the current thread.
"""
self.client.beta.threads.messages.create(
thread_id=self.thread.id,
role=role,
content=message,
file_ids=files,
)
def send_message(self):
"""
Sends a message via the assistant in the current thread.
"""
self.run = self.client.beta.threads.runs.create(
thread_id=self.thread.id,
assistant_id=self.assistant.id,
)
def get_messages(self):
"""
Retrieves all messages from the current thread.
"""
return self.client.beta.threads.messages.list(thread_id=self.thread.id)
def check_run_status(self):
"""
Checks and waits for the run status to complete, with a spinner for user feedback.
"""
run = self.client.beta.threads.runs.retrieve(
thread_id=self.thread.id,
run_id=self.run.id,
)
spinner = Halo(text="Thinking...", spinner="dots")
spinner.start()
counter = 0
while run.status in ["in_progress", "queued"]:
if counter % 10 == 0:
run = self.client.beta.threads.runs.retrieve(
thread_id=self.thread.id,
run_id=self.run.id,
)
time.sleep(5)
counter += 1
if run.status == "completed":
spinner.succeed("Done")
else:
spinner.fail("Error")
raise Exception(f"Run failed: {run}")
| [] |
2024-01-10 | iboraham/assistant-gpt-cli | assistant~api_validation.py | import time
import openai
from halo import Halo
from openai import OpenAI
from .ui_utils import logger
def check_api_key(api_key):
"""
Validates the provided OpenAI API key.
This function attempts to list the models using the given API key to check its validity.
It uses a spinner to indicate progress and logs any authentication errors encountered.
Args:
api_key (str): The API key to be validated.
Returns:
bool: True if the API key is valid, False otherwise.
"""
spinner = Halo(text="Checking API key", spinner="dots")
spinner.start()
client = OpenAI(api_key=api_key)
try:
# Attempt to list models to verify the API key.
client.models.list()
except openai.AuthenticationError as e:
spinner.fail("Invalid API key!")
logger.error(e) # Log the error for debugging purposes.
return False
else:
spinner.succeed("API key is valid 🎉")
time.sleep(1) # Short pause for user readability.
return True
| [] |
2024-01-10 | AITOOLS-2023/Chatbot | 01_gtts_chatbot.py | from telegram.ext import Updater, MessageHandler, Filters
import telegram
import openai
from moviepy.editor import AudioFileClip
from gtts import gTTS
openai.api_key = "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"
TELEGRAM_API_TOKEN = "6505242412:AAGbV9jEQuDWom491Oryp4-2ubv9Cn_vB9A"
messages = [{"role": "system", "content": "You are a helpful assistant."}]
def text_message(update, context):
update.message.reply_text(
"I've received a text message! Please give me a second to respond :)")
messages.append({"role": "user", "content": update.message.text})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
response_text = response["choices"][0]["message"]["content"]
tts = gTTS(text=response_text, lang='en')
tts.save('response_gtts.mp3')
context.bot.send_voice(chat_id=update.message.chat.id,
voice=open('response_gtts.mp3', 'rb'))
update.message.reply_text(
text=f"*[Bot]:* {response_text}", parse_mode=telegram.ParseMode.MARKDOWN)
messages.append({"role": "assistant", "content": response_text})
def voice_message(update, context):
update.message.reply_text(
"I've received a voice message! Please give me a second to respond :)")
voice_file = context.bot.getFile(update.message.voice.file_id)
voice_file.download("voice_message.ogg")
audio_clip = AudioFileClip("voice_message.ogg")
audio_clip.write_audiofile("voice_message.mp3")
audio_file = open("voice_message.mp3", "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file).text
update.message.reply_text(
text=f"*[You]:* _{transcript}_", parse_mode=telegram.ParseMode.MARKDOWN)
messages.append({"role": "user", "content": transcript})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
response_text = response["choices"][0]["message"]["content"]
tts = gTTS(text=response_text, lang='hi')
# Save the audio to a file
tts.save('response_gtts.mp3')
context.bot.send_voice(chat_id=update.message.chat.id,
voice=open('response_gtts.mp3', 'rb'))
update.message.reply_text(
text=f"*[Bot]:* {response_text}", parse_mode=telegram.ParseMode.MARKDOWN)
messages.append({"role": "assistant", "content": response_text})
updater = Updater(TELEGRAM_API_TOKEN, use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(MessageHandler(
Filters.text & (~Filters.command), text_message))
dispatcher.add_handler(MessageHandler(Filters.voice, voice_message))
updater.start_polling()
updater.idle()
| [
"You are a helpful assistant."
] |
2024-01-10 | nealchandra/evals | evals~elsuite~modelgraded~classify.py | """
Generic eval that uses a prompt + classification.
"""
import itertools
import logging
import string
from collections import Counter
from random import Random
from typing import Callable, Iterable, Optional
import openai
import evals
import evals.record
from evals.base import ModelSpec
from evals.elsuite.utils import (
PromptFn,
format_necessary,
load_modelgraded_specs,
scrub_formatting_from_prompt,
)
INVALID_STR = "__invalid__"
CHOICE_KEY = "choice"
MATCH_FNS = {
"include": lambda x, y: float(x in y),
"exact": lambda x, y: float(x == y),
"endswith": lambda x, y: x.endswith(y),
"starts_or_endswith": lambda x, y: x.startswith(y) or x.endswith(y),
}
ANSWER_PROMPTS = {
# e.g. "Yes"
"classify": "Answer the question by printing only a single choice from {choices} (without quotes or punctuation) corresponding to the correct answer with no other text.".strip(),
# e.g. "Yes\n The reasons are: ..."
"classify_cot": "First, answer by printing a single choice from {choices} (without quotes or punctuation) corresponding to the correct answer. Then, from the next line, explain your reasonings step by step.".strip(),
# e.g. "Let's think step by step. ...\nYes"
"cot_classify": """
First, write out in a step by step manner your reasoning to be sure that your conclusion is correct. Avoid simply stating the correct answer at the outset. Then print only a single choice from {choices} (without quotes or punctuation) on its own line corresponding to the correct answer. At the end, repeat just the answer by itself on a new line.
Reasoning:""".strip(),
"cot_classify_jp": """
まず、一歩一歩あなたの推論を書き出してください。単に正しい答えを最初に述べることを避けてください。次に、{choices}(引用符や句読点なし)から正しい答えに対応する1つの選択肢を単独の行に書きだしてください。最後に、答えだけを新しい行に繰り返してください。
推論:
""".strip(),
}
def choice_to_str(choice_strings: Iterable[str]) -> str:
"""Return a string of choices, e.g. '"Yes" or "No" or "Maybe"'."""
return " or ".join(f'"{choice}"' for choice in choice_strings)
def get_choice(text: str, eval_type: str, match_fn: Callable, choice_strings: Iterable[str]) -> str:
"""Clean the answer string to a choice string to one of choice_strings. Return '__invalid__.' if no match."""
lines = text.strip().split("\n")
if eval_type.startswith("cot_classify"):
lines = lines[::-1] # reverse lines
for line in lines:
line = line.strip()
line = "".join(c for c in line if c not in string.punctuation)
if not line:
continue
for choice in choice_strings:
if match_fn(line, choice):
return choice
return INVALID_STR
def expand_args_dict(args_dict):
"""Expand a dict of dicts, with namings.
args_dict = {
"a": {"a1": 1, "a2": 2},
"b": {"b1": 3, "b2": 4},
}
expand_args_dict(args_dict) = {
"a=a1:b=b1": {"a": ("a1", 1), "b": ("b1", 3)},
"a=a1:b=b2": {"a": ("a1", 1), "b": ("b2", 4)},
...}
"""
args_dict = {k: list(v.items()) for k, v in args_dict.items()}
keys = list(args_dict.keys())
values = list(args_dict.values())
new_values = [dict(zip(keys, v)) for v in itertools.product(*values)]
new_names = [":".join([f"{k}={v[0]}" for k, v in sorted(d.items())]) for d in new_values]
return dict(zip(new_names, new_values))
class ModelBasedClassify(evals.Eval):
invalid_request_during_completion = 0
invalid_request_during_evaluation = 0
def __init__(
self,
model_specs: evals.ModelSpecs,
samples_jsonl: str,
modelgraded_spec_file: str,
*args,
match_fn: str = "starts_or_endswith",
max_tokens: int = 1024,
multicomp_n: int = 1,
multicomp_temperature: float = 0.4,
samples_renamings: Optional[dict[str, str]] = None,
eval_type: Optional[str] = None,
metaeval: bool = False,
modelgraded_spec_args: Optional[dict[str, dict[str, str]]] = None,
**kwargs,
):
super().__init__(model_specs, *args, **kwargs)
self.max_tokens = max_tokens
self.samples_jsonl = samples_jsonl
self.match_fn = MATCH_FNS[match_fn]
self.metaeval = metaeval
self.multicomp_n = multicomp_n
self.multicomp_temperature = multicomp_temperature
self.samples_renamings = samples_renamings or {}
# check if multiple models are specified
if len(self.model_specs.completions) > 1:
assert self.multicomp_n == len(
self.model_specs.completions
), f"multicomp_n={self.multicomp_n} must be equal to the number of models={len(self.model_specs.completions)} if multiple models are specified."
if self.multicomp_n > 1 and self.multicomp_temperature == 0:
logging.warning(
f"multicomp_temperature={self.multicomp_temperature} is 0 for {self.multicomp_n} model outputs. Specify multiple completion models, e.g. 'oaieval gpt-3.5-turbo,gpt-4 ...'?"
)
if self.model_spec.name == "dummy-completion" or self.model_spec.name == "dummy-chat":
self.eval_modelspec = self.model_spec
else:
self.eval_modelspec = ModelSpec(
name="gpt-3.5-turbo", model="gpt-3.5-turbo", is_chat=True
)
"""import prompt and set attributes"""
modelgraded_specs = load_modelgraded_specs(modelgraded_spec_file)
# 'choice_strings' is a list of strings that specifies the possible choices
self.choice_strings = modelgraded_specs.pop("choice_strings")
if self.choice_strings == "from_n":
self.choice_strings = [str(i + 1) for i in range(self.multicomp_n)]
# make sure each choice doesn't contain any punctuation
for s in self.choice_strings:
assert not any(c in s for c in string.punctuation), f"{s} contains punctuation"
# (optional) 'choice_scores' is a dict that specifies the score for each choice string
# if 'choice_scores' is specified, 'scores/' are computed and added to metrics
self.choice_scores = modelgraded_specs.pop("choice_scores", {})
if self.choice_scores == "from_strings":
self.choice_scores = {c: float(c) for c in self.choice_strings}
assert all(
isinstance(v, (int, float)) for v in self.choice_scores.values()
), f"choice_scores must be a dict of floats, not {self.choice_scores}"
# (optional) 'eval_type' is a string that specifies the type of classification algorithm
# - "classify": only answer
# - "cot_classify": reason then answer (chain-of-thought) <- most recommended
# - "classify_cot": answer then reason (explanation)
# if 'eval_type' is not supplied from modelgraded_specs, then it must be supplied as an argument.
# - Importantly, it also assumes the answer prompt needs to be appended to the prompt.
self.eval_type = modelgraded_specs.pop("eval_type", None)
if not self.eval_type:
append_answer_prompt = True # append answer prompt to prompt
assert (
eval_type
), "eval_type must be specified, in modelgraded_spec_file or as an argument"
self.eval_type = eval_type
else:
assert (
not eval_type
), f"eval_type must be unspecified, if it is specified in modelgraded_spec_file"
append_answer_prompt = False
# 'prompt' is a string that specifies the model-graded evaluation
prompt = modelgraded_specs.pop("prompt")
assert isinstance(prompt, str), f"prompt must be a string, not {type(prompt)}"
if append_answer_prompt:
prompt += "\n\n" + ANSWER_PROMPTS[self.eval_type].format(
choices=choice_to_str(self.choice_strings)
)
self.prompt = [{"role": "user", "content": prompt}]
# 'input_outputs' is a dict that specifies the input and output keys in the sample
# output key is the model's raw response to input key. These are used for filling 'prompt' template.
self.input_outputs = modelgraded_specs.pop("input_outputs")
assert isinstance(
self.input_outputs, dict
), f"input_outputs must be a dict, not {type(self.input_outputs)}"
# (optional) 'args' is a dict of dicts that specifies additional arguments for 'prompt'
# each value in 'args_dict' essentially defines a separate modelgraded classification eval and has own metrics!
# if 'modelgraded_spec_args' is specified in eval YAML, it is merged with 'args_dict'
self.args_dict = modelgraded_specs.pop("args", {})
self.args_dict.update(modelgraded_spec_args or {})
if self.args_dict:
self.expanded_args_dict = expand_args_dict(self.args_dict)
else:
self.expanded_args_dict = {}
# (optional) 'completion_sample_templates'
# each key must be one of 'input_outputs'.values(). If 'multicomp_n' > 1, this template is filled 'multicomp_n' times
# and the concatenated result is passed to 'prompt' template.
self.completion_sample_templates = modelgraded_specs.pop("completion_sample_templates", {})
assert all(
k in self.input_outputs.values() for k in self.completion_sample_templates
), f"all {self.completion_sample_templates.keys()} must be in {self.input_outputs.values()}, "
if self.multicomp_n > 1:
assert (
self.completion_sample_templates
), "completion_sample_templates must be specified if multicomp_n > 1"
# since we accept optional args, we need to check that all args are used
assert not modelgraded_specs, f"Unused args: {modelgraded_specs}. Typo in YAML?"
def eval_sample(self, test_sample: dict, rng: Random) -> None:
"""Evaluate a single sample.
Recorded metrics are always: one of the self.choice_strings, or "__invalid__".
"""
if self.samples_renamings:
test_sample = {self.samples_renamings.get(k, k): v for k, v in test_sample.items()}
if self.multicomp_n > 1:
test_sample["n"] = self.multicomp_n
completions = {}
if self.metaeval:
# assert outputs exist in the data
for v in self.input_outputs.values():
assert v in test_sample, f"Missing output '{v}' in sample {test_sample.keys()}"
completions[v] = test_sample[v]
# remove outputs from the data
test_sample = {
k: v for k, v in test_sample.items() if k not in list(self.input_outputs.values())
}
for k in self.input_outputs:
test_sample[k] = scrub_formatting_from_prompt(test_sample[k])
if not self.metaeval:
try:
for k, v in self.input_outputs.items():
if self.multicomp_n > 1 and v in self.completion_sample_templates:
completion = ""
completion_i_template = self.completion_sample_templates[v]
for i in range(self.multicomp_n):
if len(self.model_specs.completions) > 1:
# use a separate model for each completion
model_spec = self.model_specs.completions[i]
else:
# use the single model for all completions
model_spec = self.model_spec
get_input_completion = PromptFn(
test_sample[k],
model_spec=model_spec,
max_tokens=self.max_tokens,
temperature=self.multicomp_temperature,
)
completion_i, _ = get_input_completion()
completion += format_necessary(
completion_i_template,
i=i + 1,
output=completion_i,
n=self.multicomp_n,
)
else:
get_input_completion = PromptFn(
test_sample[k],
model_spec=self.model_spec,
max_tokens=self.max_tokens,
)
completion, _ = get_input_completion()
completions[v] = completion
except openai.error.InvalidRequestError:
self.invalid_request_during_completion += 1
return
try:
metrics = {}
evaluate = PromptFn(
self.prompt,
model_spec=self.eval_modelspec,
max_tokens=self.max_tokens,
)
eval_kwargs = dict(**completions, **test_sample)
if self.expanded_args_dict:
args_dict = self.expanded_args_dict
else:
args_dict = {CHOICE_KEY: {}}
for metric, args in args_dict.items():
args = {k: v[1] for k, v in args.items()}
evaluation, _ = evaluate(**args, **eval_kwargs)
choice = get_choice(evaluation, self.eval_type, self.match_fn, self.choice_strings)
if choice == INVALID_STR:
logging.warn(
f"Choices {self.choice_strings} not parsable for {self.eval_type}: {evaluation}"
)
metrics[metric] = choice
if self.metaeval:
assert (
metric in test_sample
), f"Missing label for metric '{metric}' in sample {test_sample.keys()}"
metrics[metric + "_metascore"] = choice == test_sample[metric]
except openai.error.InvalidRequestError:
self.invalid_request_during_evaluation += 1
return
evals.record.record_metrics(**metrics)
return choice
def run(self, recorder):
samples = evals.get_jsonl(self.samples_jsonl)
self.eval_all_samples(recorder, samples)
all_sample_metrics = recorder.get_metrics()
record_metrics = {}
if self.expanded_args_dict:
metrics = sorted(self.expanded_args_dict)
else:
metrics = [CHOICE_KEY]
for metric in metrics:
chosen = [m[metric] for m in all_sample_metrics if metric in m]
# if there is a best choice, compute the score
if self.choice_scores:
# assumption: each INVALID_STR contributes the lowest score
lowest_score = min(self.choice_scores.values())
scores = [
self.choice_scores[choice] if choice != INVALID_STR else lowest_score
for choice in chosen
]
record_metrics[f"score/{metric}"] = sum(scores) / len(all_sample_metrics)
# compute the counts and ratios
counts = dict(Counter(chosen))
missing_samples = len(all_sample_metrics) - len(chosen)
if missing_samples:
counts["__missing_samples__"] = missing_samples
record_metrics.update({f"counts/{metric}/{k}": v for k, v in counts.items()})
if self.metaeval:
metascores = [m[metric + "_metascore"] for m in all_sample_metrics if metric in m]
record_metrics[f"metascore/{metric}"] = sum(metascores) / len(all_sample_metrics)
record_metrics["invalid_request_during_completion"] = self.invalid_request_during_completion
record_metrics["invalid_request_during_evaluation"] = self.invalid_request_during_evaluation
return record_metrics
| [
"\n\n",
"False",
"True",
"{'classify': 'Answer the question by printing only a single choice from {choices} (without quotes or punctuation) corresponding to the correct answer with no other text.', 'classify_cot': 'First, answer by printing a single choice from {choices} (without quotes or punctuation) corresponding to the correct answer. Then, from the next line, explain your reasonings step by step.', 'cot_classify': 'First, write out in a step by step manner your reasoning to be sure that your conclusion is correct. Avoid simply stating the correct answer at the outset. Then print only a single choice from {choices} (without quotes or punctuation) on its own line corresponding to the correct answer. At the end, repeat just the answer by itself on a new line.\\n\\nReasoning:', 'cot_classify_jp': 'まず、一歩一歩あなたの推論を書き出してください。単に正しい答えを最初に述べることを避けてください。次に、{choices}(引用符や句読点なし)から正しい答えに対応する1つの選択肢を単独の行に書きだしてください。最後に、答えだけを新しい行に繰り返してください。\\n\\n推論:'}"
] |
2024-01-10 | nealchandra/evals | evals~cli~oaieval.py | """
This file defines the `oaieval` CLI for running evals.
"""
import argparse
import logging
import shlex
import sys
from functools import cached_property
from typing import Any, Mapping, Optional
import openai
import evals
import evals.api
import evals.base
import evals.record
from evals.base import ModelSpec, ModelSpecs
from evals.registry import registry
logger = logging.getLogger(__name__)
def _purple(str):
return f"\033[1;35m{str}\033[0m"
def parse_args(args=sys.argv[1:]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Run evals through the API")
parser.add_argument("model", type=str, help="Name of a completion model.")
parser.add_argument("eval", type=str, help="Name of an eval. See registry.")
parser.add_argument("--embedding_model", type=str, default="")
parser.add_argument("--ranking_model", type=str, default="")
parser.add_argument("--extra_eval_params", type=str, default="")
parser.add_argument("--max_samples", type=int, default=None)
parser.add_argument("--cache", action=argparse.BooleanOptionalAction, default=True)
parser.add_argument("--visible", action=argparse.BooleanOptionalAction, default=None)
parser.add_argument("--seed", type=int, default=20220722)
parser.add_argument("--user", type=str, default="")
parser.add_argument("--record_path", type=str, default=None)
parser.add_argument(
"--log_to_file", type=str, default=None, help="Log to a file instead of stdout"
)
parser.add_argument("--debug", action=argparse.BooleanOptionalAction, default=False)
parser.add_argument("--local-run", action=argparse.BooleanOptionalAction, default=True)
parser.add_argument("--dry-run", action=argparse.BooleanOptionalAction, default=False)
parser.add_argument("--dry-run-logging", action=argparse.BooleanOptionalAction, default=True)
return parser.parse_args(args)
def n_ctx_from_model_name(model_name: str) -> Optional[int]:
"""Returns n_ctx for a given API model name. Model list last updated 2023-03-14."""
# note that for most models, the max tokens is n_ctx + 1
DICT_OF_N_CTX_BY_MODEL_NAME_PREFIX: dict[str, int] = {
"dummy-": 2048,
"gpt-3.5-turbo-": 4096,
"gpt-4-": 8192,
"gpt-4-32k-": 32768,
}
DICT_OF_N_CTX_BY_MODEL_NAME: dict[str, int] = {
"ada": 2048,
"text-ada-001": 2048,
"babbage": 2048,
"text-babbage-001": 2048,
"curie": 2048,
"text-curie-001": 2048,
"davinci": 2048,
"text-davinci-001": 2048,
"code-davinci-002": 8000,
"text-davinci-002": 4096,
"text-davinci-003": 4096,
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
"gpt-4": 8192,
"gpt-4-0314": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768,
}
# first, look for a prefix match
for model_prefix, n_ctx in DICT_OF_N_CTX_BY_MODEL_NAME_PREFIX.items():
if model_name.startswith(model_prefix):
return n_ctx
# otherwise, look for an exact match and return None if not found
return DICT_OF_N_CTX_BY_MODEL_NAME.get(model_name, None)
class ModelResolver:
# This is a temporary method to identify which models are chat models.
# Eventually, the OpenAI API should expose this information directly.
CHAT_MODELS = {
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
"dummy-chat",
}
DUMMY_MODELS = {
"dummy-chat",
"dummy-completion",
}
def resolve(self, name: str) -> ModelSpec:
if name in self.DUMMY_MODELS:
result = ModelSpec(name=name, model=name, is_chat=(name in self.CHAT_MODELS))
return result
if name in self.api_model_ids:
result = ModelSpec(
name=name,
model=name,
is_chat=(name in self.CHAT_MODELS),
n_ctx=n_ctx_from_model_name(name),
)
return result
raise ValueError(f"Couldn't find model: {name}")
@cached_property
def api_model_ids(self):
return [m["id"] for m in openai.Model.list()["data"]]
def run(args):
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
visible = args.visible if args.visible is not None else (args.max_samples is None)
if args.max_samples is not None:
evals.eval.set_max_samples(args.max_samples)
eval_spec = registry.get_eval(args.eval)
assert (
eval_spec is not None
), f"Eval {args.eval} not found. Available: {list(sorted(registry._evals.keys()))}"
model_resolver = ModelResolver()
def get_model(name: str) -> ModelSpec:
return model_resolver.resolve(name)
completion_model_specs = [get_model(model) for model in args.model.split(",")]
model_specs = ModelSpecs(
completions_=completion_model_specs,
embedding_=get_model(args.embedding_model) if args.embedding_model else None,
ranking_=get_model(args.ranking_model) if args.ranking_model else None,
)
run_config = {
"model_specs": model_specs,
"eval_spec": eval_spec,
"seed": args.seed,
"max_samples": args.max_samples,
"command": " ".join(map(shlex.quote, sys.argv)),
"initial_settings": {
"visible": visible,
},
}
model_name = model_specs.completions_[0].name if len(model_specs.completions_) > 0 else "n/a"
eval_name = eval_spec.key
run_spec = evals.base.RunSpec(
model_name=model_name,
model_names=model_specs.names,
eval_name=eval_name,
base_eval=eval_name.split(".")[0],
split=eval_name.split(".")[1],
run_config=run_config,
created_by=args.user,
)
if args.record_path is None:
record_path = f"/tmp/evallogs/{run_spec.run_id}_{args.model}_{args.eval}.jsonl"
else:
record_path = args.record_path
if args.dry_run:
recorder = evals.record.DummyRecorder(run_spec=run_spec, log=args.dry_run_logging)
elif args.local_run:
recorder = evals.record.LocalRecorder(record_path, run_spec=run_spec)
else:
recorder = evals.record.Recorder(record_path, run_spec=run_spec)
api_extra_options = {}
if not args.cache:
api_extra_options["cache_level"] = 0
run_url = f"{run_spec.run_id}"
logger.info(_purple(f"Run started: {run_url}"))
def parse_extra_eval_params(param_str: Optional[str]) -> Mapping[str, Any]:
"""Parse a string of the form "key1=value1,key2=value2" into a dict."""
if not param_str:
return {}
def to_number(x):
try:
return int(x)
except:
pass
try:
return float(x)
except:
pass
return x
str_dict = dict(kv.split("=") for kv in param_str.split(","))
return {k: to_number(v) for k, v in str_dict.items()}
extra_eval_params = parse_extra_eval_params(args.extra_eval_params)
eval_class = registry.get_class(eval_spec)
eval = eval_class(model_specs=model_specs, seed=args.seed, name=eval_name, **extra_eval_params)
result = eval.run(recorder)
recorder.record_final_report(result)
if not (args.dry_run or args.local_run):
logger.info(_purple(f"Run completed: {run_url}"))
logger.info("Final report:")
for key, value in result.items():
logger.info(f"{key}: {value}")
return run_spec.run_id
def main():
args = parse_args()
logging.basicConfig(
format="[%(asctime)s] [%(filename)s:%(lineno)d] %(message)s",
level=logging.INFO,
filename=args.log_to_file if args.log_to_file else None,
)
logging.getLogger("openai").setLevel(logging.WARN)
if hasattr(openai.error, "set_display_cause"):
openai.error.set_display_cause()
run(args)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | RKP64/FinGLM | code~%E9%A6%92%E5%A4%B4%E7%A7%91%E6%8A%80~mantoutech~preprocess.py | import os
import json
import shutil
# import pdfplumber
# import camelot
from multiprocessing import Pool
from loguru import logger
# from langchain.document_loaders import UnstructuredPDFLoader
# from langchain.document_loaders import PDFPlumberLoader
# from langchain.document_loaders import TextLoader
# from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
# from langchain.schema import Document
# from langchain.embeddings.huggingface import HuggingFaceEmbeddings
# from langchain.vectorstores import FAISS
from config import cfg
from file import load_pdf_info
# from chinese_text_splitter import ChineseTextSplitter
# from pdf2txt import PDFProcessor
from pdf_util import PdfExtractor
from financial_state import (extract_basic_info, extract_employee_info,
extract_cbs_info, extract_cscf_info, extract_cis_info, merge_info)
def setup_xpdf():
os.chdir(cfg.XPDF_PATH)
cmd = 'chmod +x pdftotext'
os.system(cmd)
def extract_pure_content(idx, key, pdf_path):
logger.info('Extract text for {}:{}'.format(idx, key))
save_dir = os.path.join(cfg.DATA_PATH, cfg.PDF_TEXT_DIR)
key_dir = os.path.join(save_dir, key)
if not os.path.exists(key_dir):
os.mkdir(key_dir)
save_path = os.path.join(key_dir, 'pure_content.txt')
if os.path.exists(save_path):
os.remove(save_path)
PdfExtractor(pdf_path).extract_pure_content_and_save(save_path)
# def extract_text(idx, key, pdf_path):
# print(idx, key, pdf_path)
# save_dir = os.path.join(cfg.DATA_PATH, __pdf_text_dir__)
# key_dir = os.path.join(save_dir, key)
# if not os.path.exists(key_dir):
# os.mkdir(key_dir)
# save_path = os.path.join(key_dir, 'docs.txt')
# # if os.path.exists(save_path):
# # return
# # else:
# # os.chdir(__xpdf_path__)
# # cmd = './pdftotext -lineprinter "{}" "{}"'.format(pdf_path, save_path)
# # print(cmd)
# # os.system(cmd)
# try:
# processor = PDFProcessor(pdf_path)
# processor.process_pdf()
# processor.save_all_text(save_path)
# # PdfExtractor(pdf_path).extract_and_save(save_path)
# except Exception as e:
# print(e, pdf_path)
def extract_pdf_text(extract_func=extract_pure_content):
setup_xpdf()
save_dir = os.path.join(cfg.DATA_PATH, cfg.PDF_TEXT_DIR)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
pdf_info = load_pdf_info()
# for i, (k, v) in enumerate(pdf_info.items()):
# extract_func(i, k, v['pdf_path'])
with Pool(processes=cfg.NUM_PROCESSES) as pool:
results = pool.starmap(extract_func, [(i, k, v['pdf_path']) for i, (k, v) in enumerate(pdf_info.items())])
def extract_pdf_tables():
pdf_info = load_pdf_info()
pdf_keys = list(pdf_info.keys())
# basic_info
with Pool(processes=cfg.NUM_PROCESSES) as pool:
results = pool.map(extract_basic_info, pdf_keys)
merge_info('basic_info')
# # employee_info
with Pool(processes=cfg.NUM_PROCESSES) as pool:
results = pool.map(extract_employee_info, pdf_keys)
merge_info('employee_info')
# cbs_info
with Pool(processes=cfg.NUM_PROCESSES) as pool:
results = pool.map(extract_cbs_info, pdf_keys)
merge_info('cbs_info')
# cscf_info
with Pool(processes=cfg.NUM_PROCESSES) as pool:
results = pool.map(extract_cscf_info, pdf_keys)
merge_info('cscf_info')
# cis_info
with Pool(processes=cfg.NUM_PROCESSES) as pool:
results = pool.map(extract_cis_info, pdf_keys)
merge_info('cis_info')
# def generate_embedding_vector(key, embedding):
# text_path = os.path.join(cfg.DATA_PATH, __pdf_text_dir__, key, 'docs.txt')
# loader = TextLoader(text_path, encoding='utf-8')
# docs = loader.load_and_split(text_splitter=RecursiveCharacterTextSplitter(
# separators=['\n'], keep_separator=False,
# chunk_size=1024, chunk_overlap=0,
# length_function=len, add_start_index=True))
# # for doc in docs:
# # print(len(doc.page_content))
# # print(doc.page_content)
# # print(doc.metadata)
# # print('*'*100)
# # exit(0)
# doc_vecs = FAISS.from_documents(docs, embedding)
# doc_vecs.save_local(os.path.join(cfg.DATA_PATH, __pdf_text_dir__, key, 'doc_vecs'))
# def generate_embedding_all():
# os.environ['CUDA_VISIBLE_DEVICES'] = '3'
# # embeddings = None
# connection_error = True
# while connection_error:
# try:
# embeddings = HuggingFaceEmbeddings(model_name='GanymedeNil/text2vec-large-chinese')
# connection_error = False
# except Exception as e:
# print(e)
# continue
# with open(os.path.join(cfg.DATA_PATH, 'pdf_info.json')) as f:
# pdf_info = json.load(f)
# for k, v in pdf_info.items():
# print(k)
# generate_embedding_vector(k, embeddings)
if __name__ == '__main__':
import os
import time
# import ghostscript
os.environ['PATH'] = r'C:\Program Files\gs\gs10.01.2\bin;' + os.environ['PATH']
# import ctypes
# from ctypes.util import find_library
# lib = find_library("".join(("gsdll", str(ctypes.sizeof(ctypes.c_voidp) * 8), ".dll")))
# print(lib)
# import camelot
# generate_embedding_all()
# extract_text_all(extract_func=extract_pure_content)
# extract_pure_content(0, '2020-03-25__南京钢铁股份有限公司__600282__南钢股份__2019年__年度报告.pdf',
# '/raidnvme/czc/MODELSCOPE_CACHE_HOME/modelscope/hub/datasets/modelscope/chatglm_llm_fintech_raw_dataset/master/data_files/1106979bbfe796043d45ea0f4831c916802713a7b08a580e98421d91d8ba0eb3')
pdf_path = r'C:\Users\CHENZHAOCAI\Downloads\test.pdf'
out_path = r'C:\Users\CHENZHAOCAI\Downloads\test.txt'
# pdf_path = '/raidnvme/czc/MODELSCOPE_CACHE_HOME/modelscope/hub/datasets/modelscope/chatglm_llm_fintech_raw_dataset/master/data_files/011af0d314a605ab3cff699f48af52248d2d9fabe417b811321d11107fa49c97'
# start = time.time()
PdfExtractor(pdf_path).extract_table_of_pages([103])
# PdfExtractor(pdf_path).extract_pure_content_and_save(out_path, True)
# end = time.time()
# print(end - start)
# from file import load_pdf_info, load_pdf_pure_text
# pdf_info = load_pdf_info()
# for k, v in pdf_info.items():
# # print(k, v['pdf_path'])
# text_lines = load_pdf_pure_text(k)
# if len(text_lines) == 0:
# extract_pure_content(0, k, v['pdf_path']) | [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.