code
stringlengths 141
79.4k
| apis
sequencelengths 1
23
| extract_api
stringlengths 126
73.2k
|
---|---|---|
import os
try:
from genai.credentials import Credentials
from genai.schemas import GenerateParams
from genai.extensions.langchain import LangChainInterface
from langchain import PromptTemplate
from langchain.chains import LLMChain, SimpleSequentialChain
except ImportError:
raise ImportError("Could not import langchain: Please install ibm-generative-ai[langchain] extension.")
"""
pip install "ibm-generative-ai[langchain]"
"""
# make sure you have a .env file under genai root with
api_key = os.getenv("GENAI_KEY", None)
api_url = os.getenv("GENAI_API", None)
# create creds for watsonx.ai models
creds = Credentials(api_key, api_endpoint=api_url)
params = GenerateParams(decoding_method="greedy", min_new_tokens=3, max_new_tokens=300)
#params = GenerateParams(decoding_method="greedy")
llmp = LangChainInterface(model="ibm/granite-13b-sft", params=params, credentials=creds)
# This is a simple example of a zero shot prompt
print(f"******************************************************")
print(f" ")
print(f"This is a simple zero shot prompt")
print(f" ")
print(f"******************************************************")
print(f" ")
print(f" ")
print(llmp("Explain Mortgage backed securities in one sentence"))
"""
Implement a Prompt PromptTemplate
"""
print(f" ")
print(f" ")
print(f"******************************************************")
print(f" ")
print(f"This is a sample using a Prompt Template")
print(f" ")
print(f"******************************************************")
print(f" ")
print(f" ")
template = """
You are a mortgage broker with exertise in financial product.
Explain the concept of {concept} in a paragraph.
"""
promptp = PromptTemplate(input_variables=["concept"],template=template,)
#print(promptp)
conceptp = "closing costs"
print(llmp(promptp.format(concept=conceptp)))
"""
Implement Chains: In this section I am taking the initial prompt response
and then asking the model to update the response.
"""
print(f"******************************************************")
print(f" ")
print(f" ")
print(f"This is an example of chaining 2 prompts together to have the second prompt work in the repsonse of the first prompt. We will be using the first Prompt template from above to feed the second Prompt Template, using a Simple Sequence Chain.")
print(f" ")
print(f" ")
print(f"******************************************************")
print(f" ")
print(f" ")
#print(promptp)
chain = LLMChain(llm=llmp, prompt=promptp)
#print(chain.run(conceptp))
second_prompt = PromptTemplate(
input_variables=["ml_concept"],
template="Turn this financial description {ml_concept} and explain in techncial terms")
chain_two = LLMChain(llm=llmp, prompt=second_prompt)
overall_chain = SimpleSequentialChain(chains=[chain, chain_two], verbose=True)
explanation = overall_chain.run(conceptp)
print(explanation)
| [
"langchain.chains.LLMChain",
"langchain.chains.SimpleSequentialChain",
"langchain.PromptTemplate"
] | [((520, 548), 'os.getenv', 'os.getenv', (['"""GENAI_KEY"""', 'None'], {}), "('GENAI_KEY', None)\n", (529, 548), False, 'import os\n'), ((559, 587), 'os.getenv', 'os.getenv', (['"""GENAI_API"""', 'None'], {}), "('GENAI_API', None)\n", (568, 587), False, 'import os\n'), ((634, 676), 'genai.credentials.Credentials', 'Credentials', (['api_key'], {'api_endpoint': 'api_url'}), '(api_key, api_endpoint=api_url)\n', (645, 676), False, 'from genai.credentials import Credentials\n'), ((686, 764), 'genai.schemas.GenerateParams', 'GenerateParams', ([], {'decoding_method': '"""greedy"""', 'min_new_tokens': '(3)', 'max_new_tokens': '(300)'}), "(decoding_method='greedy', min_new_tokens=3, max_new_tokens=300)\n", (700, 764), False, 'from genai.schemas import GenerateParams\n'), ((823, 909), 'genai.extensions.langchain.LangChainInterface', 'LangChainInterface', ([], {'model': '"""ibm/granite-13b-sft"""', 'params': 'params', 'credentials': 'creds'}), "(model='ibm/granite-13b-sft', params=params, credentials=\n creds)\n", (841, 909), False, 'from genai.extensions.langchain import LangChainInterface\n'), ((1698, 1760), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['concept']", 'template': 'template'}), "(input_variables=['concept'], template=template)\n", (1712, 1760), False, 'from langchain import PromptTemplate\n'), ((2476, 2510), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llmp', 'prompt': 'promptp'}), '(llm=llmp, prompt=promptp)\n', (2484, 2510), False, 'from langchain.chains import LLMChain, SimpleSequentialChain\n'), ((2556, 2700), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['ml_concept']", 'template': '"""Turn this financial description {ml_concept} and explain in techncial terms"""'}), "(input_variables=['ml_concept'], template=\n 'Turn this financial description {ml_concept} and explain in techncial terms'\n )\n", (2570, 2700), False, 'from langchain import PromptTemplate\n'), ((2713, 2753), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llmp', 'prompt': 'second_prompt'}), '(llm=llmp, prompt=second_prompt)\n', (2721, 2753), False, 'from langchain.chains import LLMChain, SimpleSequentialChain\n'), ((2770, 2832), 'langchain.chains.SimpleSequentialChain', 'SimpleSequentialChain', ([], {'chains': '[chain, chain_two]', 'verbose': '(True)'}), '(chains=[chain, chain_two], verbose=True)\n', (2791, 2832), False, 'from langchain.chains import LLMChain, SimpleSequentialChain\n')] |
#%% Import Flask and create an app object
import config
from dotenv import load_dotenv
load_dotenv()
import os
import json
import asyncio
import openai
import pprint as pp
import markdown
# openai.api_key = os.getenv("OPENAI_API_KEY")
# os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
# Import Flask and create an app object
from flask import Flask, render_template, request, jsonify
app = Flask(__name__)
# Import asyncio and the async_playwright module from Playwright
import asyncio
from playwright.async_api import async_playwright
# Import LangChain and other modules
import langchain
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, load_tools
from langchain.agents.agent_toolkits import PlayWrightBrowserToolkit
from langchain.tools.playwright.utils import (
create_async_playwright_browser,
create_sync_playwright_browser, # A synchronous browser is available, though it isn't compatible with jupyter.
)
# This import is required only for jupyter notebooks, since they have their own eventloop
# import nest_asyncio
# nest_asyncio.apply()
#%% Agent
def get_agent():
# LLM using ChatOpenAI
llm = ChatOpenAI(model_name="gpt-4", temperature=0) # Also works well with Anthropic models
# Get the tools from the toolkit
tools = load_tools(["serpapi"], llm=llm)
# Create an agent using ChatOpenAI and initialize it with tools and handle_parsing_errors=True
agent_chain = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True
)
# agent_chain = get_agent()
#%%
@app.route("/")
def home():
# Use some initial chat history (empty, in this case)
chat_history = []
return render_template("chat.html", chat_history=chat_history)
@app.route("/respond")
def respond():
""" Create another route for your chatbot logic
"""
print("\nrespond::start:")
# Get the user message and the chat history from the interface
message = request.args.get("message")
print(f" respond::message: {message}")
# Parse the chat history as a JSON object
chat_history = json.loads(request.args.get("chat_history", "[]"))
print(f" respond::chat_history: {chat_history}")
# Run your main coroutine that handles the chatbot logic
# response = asyncio.run(main(message, chat_history))
chat_history = main(message, chat_history)
response = jsonify({"chat_history": chat_history})
print("\nrespond::return:")
print(f" respond::chat_history: {chat_history}")
# print response to console as json
pp.pprint(response.json)
# Return the response as a JSON string
return response
def main(message, chat_history):
""" Define your main coroutine that handles the chatbot logic
"""
print("\nmain::start:")
print(f" main::message: {message}")
print(f" main::chat_history: {chat_history}")
# Run your agent chain with the user message as input and get the bot message as output
bot_message = "I'm a STICK!!!\n\n```python\nprint('Hello World')\n```"
# Convert bot_message to HTML
bot_message_html = markdown.markdown(bot_message)
# Append the user message and the bot message to the chat history
chat_history[-1][1] = bot_message_html
print("\nmain::return:")
print(f" main::bot_message: {bot_message}")
print(f" main::bot_message_html: {bot_message_html}")
print(f" main::chat_history: {chat_history}")
# Return the updated chat history as a string
return chat_history
# def main(message, chat_history):
# """ Define your main coroutine that handles the chatbot logic
# """
# print("\nmain::start:")
# print(f" main::message: {message}")
# print(f" main::chat_history: {chat_history}")
# # Run your agent chain with the user message as input and get the bot message as output
# # bot_message = "I'm a STICK!!!\n\n```python\nprint('Hello World')\n```" # agent_chain.run(input=message)
# bot_message = "I'm a STICK!!!<br><br><pre><code>print('Hello World')</code></pre>"
# print("\nmain::return:")
# print(f" main::bot_message: {bot_message}")
# # Append the user message and the bot message to the chat history
# chat_history[-1][1] = bot_message
# print(f" main::chat_history: {chat_history}")
# # Return the updated chat history as a string
# return chat_history
async def amain(message, chat_history):
""" Define your main coroutine that handles the chatbot logic
"""
# Create one event loop before creating the tools
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Use async with to create and close the async browser automatically
async with async_playwright() as playwright:
# Use await to launch a Chromium browser asynchronously
browser = await playwright.chromium.launch()
# Create a PlayWrightBrowserToolkit from the browser
browser_toolkit = PlayWrightBrowserToolkit.from_browser(async_browser=browser)
# Get the tools from the toolkit
tools = browser_toolkit.get_tools()
# Create an agent using ChatOpenAI and initialize it with tools and handle_parsing_errors=True
llm = ChatOpenAI(model_name="gpt-4", temperature=0) # Also works well with Anthropic models
agent_chain = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True
)
# Run your agent chain with the user message as input and get the bot message as output
bot_message = await agent_chain.arun(input=message)
# Append the user message and the bot message to the chat history
chat_history.append((message, bot_message))
# Return the updated chat history as a string
return str(chat_history)
# Run your Flask app with app.run()
if __name__ == "__main__":
app.run(
host="localhost",
port=None,
debug=True,
load_dotenv=True,
)
| [
"langchain.agents.initialize_agent",
"langchain.agents.agent_toolkits.PlayWrightBrowserToolkit.from_browser",
"langchain.agents.load_tools",
"langchain.chat_models.ChatOpenAI"
] | [((87, 100), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (98, 100), False, 'from dotenv import load_dotenv\n'), ((403, 418), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (408, 418), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1220, 1265), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0)'}), "(model_name='gpt-4', temperature=0)\n", (1230, 1265), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1356, 1388), 'langchain.agents.load_tools', 'load_tools', (["['serpapi']"], {'llm': 'llm'}), "(['serpapi'], llm=llm)\n", (1366, 1388), False, 'from langchain.agents import initialize_agent, load_tools\n'), ((1506, 1656), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'llm', 'agent': 'AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)', 'handle_parsing_errors': '(True)'}), '(tools=tools, llm=llm, agent=AgentType.\n STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True,\n handle_parsing_errors=True)\n', (1522, 1656), False, 'from langchain.agents import initialize_agent, load_tools\n'), ((1848, 1903), 'flask.render_template', 'render_template', (['"""chat.html"""'], {'chat_history': 'chat_history'}), "('chat.html', chat_history=chat_history)\n", (1863, 1903), False, 'from flask import Flask, render_template, request, jsonify\n'), ((2116, 2143), 'flask.request.args.get', 'request.args.get', (['"""message"""'], {}), "('message')\n", (2132, 2143), False, 'from flask import Flask, render_template, request, jsonify\n'), ((2539, 2578), 'flask.jsonify', 'jsonify', (["{'chat_history': chat_history}"], {}), "({'chat_history': chat_history})\n", (2546, 2578), False, 'from flask import Flask, render_template, request, jsonify\n'), ((2709, 2733), 'pprint.pprint', 'pp.pprint', (['response.json'], {}), '(response.json)\n', (2718, 2733), True, 'import pprint as pp\n'), ((3251, 3281), 'markdown.markdown', 'markdown.markdown', (['bot_message'], {}), '(bot_message)\n', (3268, 3281), False, 'import markdown\n'), ((4710, 4734), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (4732, 4734), False, 'import asyncio\n'), ((4739, 4767), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (4761, 4767), False, 'import asyncio\n'), ((2264, 2302), 'flask.request.args.get', 'request.args.get', (['"""chat_history"""', '"""[]"""'], {}), "('chat_history', '[]')\n", (2280, 2302), False, 'from flask import Flask, render_template, request, jsonify\n'), ((4857, 4875), 'playwright.async_api.async_playwright', 'async_playwright', ([], {}), '()\n', (4873, 4875), False, 'from playwright.async_api import async_playwright\n'), ((5095, 5155), 'langchain.agents.agent_toolkits.PlayWrightBrowserToolkit.from_browser', 'PlayWrightBrowserToolkit.from_browser', ([], {'async_browser': 'browser'}), '(async_browser=browser)\n', (5132, 5155), False, 'from langchain.agents.agent_toolkits import PlayWrightBrowserToolkit\n'), ((5359, 5404), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0)'}), "(model_name='gpt-4', temperature=0)\n", (5369, 5404), False, 'from langchain.chat_models import ChatOpenAI\n'), ((5468, 5618), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'llm', 'agent': 'AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)', 'handle_parsing_errors': '(True)'}), '(tools=tools, llm=llm, agent=AgentType.\n STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True,\n handle_parsing_errors=True)\n', (5484, 5618), False, 'from langchain.agents import initialize_agent, load_tools\n')] |
import langchain_helper as lch
import streamlit as st
st.title("Pet Name Generator")
pet_type = st.sidebar.selectbox("What is your pet?", ("dog", "cat", "bird", "fish", "reptile"))
if pet_type:
names_count = st.sidebar.slider("How many names do you want to generate?", 1, 10, 1)
if pet_type and names_count and st.button("Generate"):
response = lch.generate_pet_name(pet_type, names_count)
st.text(response["text"])
st.text("Powered by OpenAI's GPT-3.5")
| [
"langchain_helper.generate_pet_name"
] | [((55, 85), 'streamlit.title', 'st.title', (['"""Pet Name Generator"""'], {}), "('Pet Name Generator')\n", (63, 85), True, 'import streamlit as st\n'), ((98, 186), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""What is your pet?"""', "('dog', 'cat', 'bird', 'fish', 'reptile')"], {}), "('What is your pet?', ('dog', 'cat', 'bird', 'fish',\n 'reptile'))\n", (118, 186), True, 'import streamlit as st\n'), ((215, 285), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many names do you want to generate?"""', '(1)', '(10)', '(1)'], {}), "('How many names do you want to generate?', 1, 10, 1)\n", (232, 285), True, 'import streamlit as st\n'), ((319, 340), 'streamlit.button', 'st.button', (['"""Generate"""'], {}), "('Generate')\n", (328, 340), True, 'import streamlit as st\n'), ((357, 401), 'langchain_helper.generate_pet_name', 'lch.generate_pet_name', (['pet_type', 'names_count'], {}), '(pet_type, names_count)\n', (378, 401), True, 'import langchain_helper as lch\n'), ((406, 431), 'streamlit.text', 'st.text', (["response['text']"], {}), "(response['text'])\n", (413, 431), True, 'import streamlit as st\n'), ((436, 474), 'streamlit.text', 'st.text', (['"""Powered by OpenAI\'s GPT-3.5"""'], {}), '("Powered by OpenAI\'s GPT-3.5")\n', (443, 474), True, 'import streamlit as st\n')] |
import torch
from langchain.llms.base import LLM
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding
from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex
from peft import PeftModel
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
hf_model_path = "models/llama-7b"
alpaca_model_path = "models/lora-alpaca"
tokenizer = LlamaTokenizer.from_pretrained(hf_model_path)
model = LlamaForCausalLM.from_pretrained(
hf_model_path,
load_in_8bit=True, #Dissabling could solve some errors
device_map="auto",
)
model = PeftModel.from_pretrained(model, alpaca_model_path)
device = torch.device("cuda") if torch.cuda.is_available() else "cpu"
max_length = 1500 #2048
max_new_tokens = 48
class LLaMALLM(LLM):
def _call(self, prompt, stop=None):
prompt += "### Response:"
inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].cuda()
generation_config = GenerationConfig(
temperature=0.6,
top_p=0.95,
repetition_penalty=1.15,
)
with torch.no_grad():
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=128,
)
response = ""
for s in generation_output.sequences:
response += tokenizer.decode(s)
response = response[len(prompt):]
print("Model Response:", response)
return response
def _identifying_params(self):
return {"name_of_model": "alpaca"}
def _llm_type(self):
return "custom"
max_input_size = max_length
num_output = max_new_tokens
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
documents = SimpleDirectoryReader('data').load_data()
llm_predictor = LLMPredictor(llm=LLaMALLM())
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper)
index.save_to_disk('index.json')
new_index = GPTSimpleVectorIndex.load_from_disk('index.json', embed_model=embed_model, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
response = new_index.query("What did Gatsby do before he met Daisy?")
print(response.response)
response = new_index.query("What did the narrator do after getting back to Chicago?")
print(response.response)
| [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((460, 505), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['hf_model_path'], {}), '(hf_model_path)\n', (490, 505), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((517, 606), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['hf_model_path'], {'load_in_8bit': '(True)', 'device_map': '"""auto"""'}), "(hf_model_path, load_in_8bit=True,\n device_map='auto')\n", (549, 606), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((667, 718), 'peft.PeftModel.from_pretrained', 'PeftModel.from_pretrained', (['model', 'alpaca_model_path'], {}), '(model, alpaca_model_path)\n', (692, 718), False, 'from peft import PeftModel\n'), ((1997, 2056), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (2009, 2056), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex\n'), ((2226, 2345), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, embed_model=\n embed_model, prompt_helper=prompt_helper)\n', (2246, 2345), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex\n'), ((2390, 2526), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['"""index.json"""'], {'embed_model': 'embed_model', 'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), "('index.json', embed_model=embed_model,\n llm_predictor=llm_predictor, prompt_helper=prompt_helper)\n", (2425, 2526), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex\n'), ((757, 782), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (780, 782), False, 'import torch\n'), ((733, 753), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (745, 753), False, 'import torch\n'), ((2091, 2114), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (2112, 2114), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1088, 1158), 'transformers.GenerationConfig', 'GenerationConfig', ([], {'temperature': '(0.6)', 'top_p': '(0.95)', 'repetition_penalty': '(1.15)'}), '(temperature=0.6, top_p=0.95, repetition_penalty=1.15)\n', (1104, 1158), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((2129, 2158), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (2150, 2158), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex\n'), ((1224, 1239), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1237, 1239), False, 'import torch\n')] |
import streamlit as st
import pandas as pd
import time
import gcsfs
import asyncio
import os
import chromadb
from chromadb.utils import embedding_functions
import langchain
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chains import LLMChain
from langchain.vectorstores import Chroma
import ast # Import the ast module for literal evaluation
# SINGLE RESPONSE GENERATION
async def get_email_response_personalized(sender_id,
replier_id,
sender_email,
email_retrieval_dataset,
num_emails,
vector_db_client,
llm_endpoint,
template_string):
# First getting retrieved emails to understand conversation --------
sender_replier_id='-'.join([sender_id, replier_id])
previous_emails=(email_retrieval_dataset[email_retrieval_dataset.sender_replier_thread==sender_replier_id]['Sender_Receiver_Emails_list']).to_list()[0][-num_emails:]
# Second, getting ranked responses as per context ------------------
# Building the Langchain vectorstore using chroma collections
user_vector_store = Chroma(
client=vector_db_client,
collection_name='user'+str(replier_id),
embedding_function=OpenAIEmbeddings())
# Getting ranked responses using MMR
found_rel_emails = await user_vector_store.amax_marginal_relevance_search(sender_email, k=num_emails, fetch_k=num_emails)
list_rel_emails=[]
for i, doc in enumerate(found_rel_emails):
list_rel_emails.append(doc.page_content)
# Setting up LangChain
prompt_template = ChatPromptTemplate.from_template(template=template_string)
llm_chain=LLMChain(llm=llm_endpoint, prompt=prompt_template)
return llm_chain.run(sender_email=sender_email, prev_emails=previous_emails, relevant_emails=list_rel_emails)
async def main():
await get_email_response_personalized()
# https://medium.com/@faizififita1/how-to-deploy-your-streamlit-web-app-to-google-cloud-run-ba776487c5fe
# gcloud builds submit --tag gcr.io/msca310019-capstone-49b3/streamlit-app
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "credentials/msca310019-capstone-49b3-af1bf51fb3e1.json"
# Create a GCS filesystem instance
fs = gcsfs.GCSFileSystem(project='msca310019-capstone-49b3')
# Define the path to your CSV file in the GCS bucket
file_path = 'user-scripts-msca310019-capstone-49b3/data/20231026_Emails_Deduped.csv'
# Read the CSV file from Google Cloud Storage
with fs.open(file_path, 'rb') as file:
# Use Pandas to read the CSV content into a DataFrame
df_messages_deduped = pd.read_csv(file)
df_messages_deduped['Sender_Receiver_Emails_list'] = df_messages_deduped['Sender_Receiver_Emails'].apply(lambda x: ast.literal_eval(x) if pd.notna(x) else [])
# Find senders with more than 10 rows
repliers_with_more_than_10_rows = df_messages_deduped['reply_sender'].value_counts()[df_messages_deduped['reply_sender'].value_counts() > 10].index
# Filter the DataFrame to include only the rows with those senders
df_messages_deduped = df_messages_deduped[df_messages_deduped['reply_sender'].isin(repliers_with_more_than_10_rows)]
# users_more_than_50 = pd.read_csv('users_more_than_50.csv')
# # Define the path to your CSV file in the GCS bucket
# data_path = 'user-scripts-msca310019-capstone-49b3/data/data_message_reply_pairs_cleaned.csv'
# # Read the CSV file from Google Cloud Storage
# with fs.open(data_path, 'rb') as file:
# # Use Pandas to read the CSV content into a DataFrame
# data = pd.read_csv(file)
# data = pd.read_csv('/Users/scottsmacbook/Hedwig/Hedwig/00_Data/data_data_message_reply_pairs_cleaned.csv')
# Define a function to generate a random email and its sender ID
def generate_random_email():
st.session_state.random_email = df_messages_deduped.sample(n=1)
# # Define a function to type out a string letter by letter
def type_string(text):
t = st.empty()
for i in range(len(text) + 1):
t.markdown("## %s" % text[0:i])
time.sleep(0.005)
# # Set the title image path
title_image_path = 'Hedwig Logo.jpeg' # Replace with the actual path
# # Display the title and image side by side
# st.title("Hedwig.AI")
col1, mid, col2 = st.columns([1,1,20])
with col1:
st.image('Hedwig Logo.jpeg', width=60)
with col2:
st.title("Hedwig.AI")
# st.image(title_image_path, use_column_width=False, width=100)
# # Use HTML and CSS to display title and image side by side
# st.markdown(
# f"""
# <div style="display: flex; align-items: center;">
# <h1 style="flex: 1;">Hedwig.AI</h1>
# <img src="{title_image_path}" width="100">
# </div>
# """,
# unsafe_allow_html=True,
# )
# Use Streamlit's 'columns' layout to display buttons side by side
col1, col2 = st.columns(2)
with col1:
st.subheader("Incoming Email")
with col2:
st.subheader("Generated Email")
# Button to get a random email
if col1.button("Get Random Email Reply Pair"):
generate_random_email()
if 'random_email' not in st.session_state:
generate_random_email()
random_email = st.session_state.random_email
with col1:
st.write(f"Sender ID: {str(list(random_email['sender'])[0])}")
# Create a larger text area for user input (e.g., 10 rows)
user_input = st.text_area("Enter Email:", height=500)
# st.write(f"Email: {user_input}")
# Input field for email response
# replier_id = st.text_input("Enter your user :")
# Button to generate the response
if col2.button("Generate Response") and st.session_state.random_email is not None:
with col2:
st.write(f"Replier ID: {str(list(random_email['reply_sender'])[0])}")
# st.write("Response:")
random_email = st.session_state.random_email
client = chromadb.PersistentClient(path="vectorstores")
sender_id = str(list(random_email['sender'])[0])
replier_id = str(list(random_email['reply_sender'])[0])
sender_email = user_input
num_emails = 10 #FOR RETRIEVEL + RANKING
email_retrieval_dataset = df_messages_deduped # FOR RETRIEVAL DATABASE
vector_db_client = client
openai_api_key = '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'
api_key=openai_api_key
llm_model='gpt-3.5-turbo-0301' # CAN CHANGE
llm_endpoint=ChatOpenAI(temperature=0.1, model=llm_model, openai_api_key=openai_api_key) # CAN CHANGE
template_string="""You are an employee of a company and receiver of this email: {sender_email},
Reply to the email as the receiver,
deriving the context from these relevant emails : {relevant_emails}
paying attention to and copying the writing style and tone of the receiver's previous emails : {prev_emails}
Use salutation and signature similar to previous emails of the user.
"""
os.environ['OPENAI_API_KEY'] = openai_api_key
personalized_response = asyncio.run(get_email_response_personalized(sender_id,replier_id,sender_email,df_messages_deduped,
num_emails,client,llm_endpoint,template_string))
st.text_area("Generated Response: ", value=personalized_response, key='response_area', height=500)
# type_text_in_textarea(personalized_response)
| [
"langchain.chains.LLMChain",
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.prompts.ChatPromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI"
] | [((2623, 2678), 'gcsfs.GCSFileSystem', 'gcsfs.GCSFileSystem', ([], {'project': '"""msca310019-capstone-49b3"""'}), "(project='msca310019-capstone-49b3')\n", (2642, 2678), False, 'import gcsfs\n'), ((4609, 4631), 'streamlit.columns', 'st.columns', (['[1, 1, 20]'], {}), '([1, 1, 20])\n', (4619, 4631), True, 'import streamlit as st\n'), ((5175, 5188), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (5185, 5188), True, 'import streamlit as st\n'), ((1987, 2045), 'langchain.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', ([], {'template': 'template_string'}), '(template=template_string)\n', (2019, 2045), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((2064, 2114), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm_endpoint', 'prompt': 'prompt_template'}), '(llm=llm_endpoint, prompt=prompt_template)\n', (2072, 2114), False, 'from langchain.chains import LLMChain\n'), ((2988, 3005), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (2999, 3005), True, 'import pandas as pd\n'), ((4303, 4313), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (4311, 4313), True, 'import streamlit as st\n'), ((4646, 4684), 'streamlit.image', 'st.image', (['"""Hedwig Logo.jpeg"""'], {'width': '(60)'}), "('Hedwig Logo.jpeg', width=60)\n", (4654, 4684), True, 'import streamlit as st\n'), ((4700, 4721), 'streamlit.title', 'st.title', (['"""Hedwig.AI"""'], {}), "('Hedwig.AI')\n", (4708, 4721), True, 'import streamlit as st\n'), ((5205, 5235), 'streamlit.subheader', 'st.subheader', (['"""Incoming Email"""'], {}), "('Incoming Email')\n", (5217, 5235), True, 'import streamlit as st\n'), ((5252, 5283), 'streamlit.subheader', 'st.subheader', (['"""Generated Email"""'], {}), "('Generated Email')\n", (5264, 5283), True, 'import streamlit as st\n'), ((5672, 5712), 'streamlit.text_area', 'st.text_area', (['"""Enter Email:"""'], {'height': '(500)'}), "('Enter Email:', height=500)\n", (5684, 5712), True, 'import streamlit as st\n'), ((4402, 4419), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (4412, 4419), False, 'import time\n'), ((6163, 6209), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""vectorstores"""'}), "(path='vectorstores')\n", (6188, 6209), False, 'import chromadb\n'), ((6731, 6806), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model': 'llm_model', 'openai_api_key': 'openai_api_key'}), '(temperature=0.1, model=llm_model, openai_api_key=openai_api_key)\n', (6741, 6806), False, 'from langchain.chat_models import ChatOpenAI\n'), ((7673, 7776), 'streamlit.text_area', 'st.text_area', (['"""Generated Response: """'], {'value': 'personalized_response', 'key': '"""response_area"""', 'height': '(500)'}), "('Generated Response: ', value=personalized_response, key=\n 'response_area', height=500)\n", (7685, 7776), True, 'import streamlit as st\n'), ((1624, 1642), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1640, 1642), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((3145, 3156), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (3153, 3156), True, 'import pandas as pd\n'), ((3122, 3141), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (3138, 3141), False, 'import ast\n')] |
import langchain.graphs.neo4j_graph as neo4j_graph
import os
import sys
import ast
sys.path.append('backendPython')
from llms import *
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv()) # read local .env file
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate, FewShotPromptTemplate
from neo4j_dir.entities import get_nodes_chain
graph = neo4j_graph.Neo4jGraph(
url = os.environ['NEO4J_URI'],
username=os.environ['NEO4J_USERNAME'],
password=os.environ['NEO4J_PASSWORD']
)
#__________________________________________________________________________________________________
# giving few-shot prompts
examples = [
{'Query' : 'name 10 companies which came for cgpa below 7' ,
'syntax' : '''MATCH (c:Company)-[Company_to_CGPA]->(cgpa:CGPA)
WHERE cgpa.name < 7
RETURN c.name LIMIT 10'''},
{"Query": "The ctc recieved by students for cgpa below 6",
"syntax": '''MATCH (cgpa:CGPA)-[CGPA_to_CTC]->(ctc:CTC)
WHERE cgpa.name < 6
RETURN ctc '''},
{"Query": "name 10 companies which came for cgpa below 7 and ctc above 20",
"syntax" :'''MATCH (company:Company)-[Company_to_CGPA]->(cgpa:CGPA)
MATCH (company)-[Company_to_CTC]->(ctc:CTC)
WHERE cgpa.name < 7 AND ctc.name > 20
RETURN company LIMIT 10;'''
} ,
{ "Query": "minimum cgpa required for ctc above 40" ,
"syntax" : ''' MATCH (cgpa:CGPA)-[CGPA_to_CTC]->(ctc:CTC)
WHERE ctc.name > 20
RETURN MIN(cgpa.name) AS minimum_cgpa;
''' ,
} ,
{ "Query" :"maximum and minimum cgpa required for ctc above 40" ,
"syntax" : '''
MATCH (cgpa:CGPA)-[CGPA_to_CTC]->(ctc:CTC)-[CTC_to_Company]->(company:Company)
WHERE ctc.name > 40
WITH MIN(cgpa.name) AS minCGPA, MAX(cgpa.name) AS maxCGPA
RETURN minCGPA, maxCGPA;
'''
} ,
]
example_formatter_template = """
Query :
{Query}
Cypher Syntax :
{syntax}
"""
example_prompt = PromptTemplate(
input_variables=['Query','syntax'],
template=example_formatter_template,
)
prefix = '''
You are supposed to convert the following natural query into a cypher query for neo4j database
You can refer to below examples for getting idea of how to write cypher query for neo4j database.
'''
suffix = '''
create a cypher query for following natural query for neo4j database
The query has the following list of nodes :
{List_of_nodes}
You can create relations between the nodes in following manner :
Node1 : Company
Node2 : CGPA
Relation1 : Company_to_CGPA
Relation2 : CGPA_to_Company
natural_query :
{natural_query}
Do not put back-ticks(`) in the output.
Use only nodes provided , don't create your new nodes and relations.
'''
few_shot_prompt = FewShotPromptTemplate(
examples=examples,
# prompt template used to format each individual example
example_prompt=example_prompt,
# prompt template string to put before the examples, assigning roles and rules.
prefix=prefix ,
# prompt template string to put after the examples.
suffix=suffix ,
# input variable to use in the suffix template
input_variables=["List_of_nodes" , "natural_query"],
example_separator="\n",
)
cypher_chain = LLMChain(llm=llm, prompt=few_shot_prompt,verbose=False,)
#__________________________________________________________________________________________________
# cypher = cypher_chain.run({"List_of_nodes" :['Company' , 'CGPA'] , 'natural_query':'name 10 companies which came for cgpa below 7'})
# result = graph.query(cypher)
# print(result)
#__________________________________________________________________________________________________
result_template = '''
You will be provided with the response generated for the given user query.
Response :
{response}
You need to format the respone in html in a conversational way,
arrange the response in bulleted points and under major headings if possible.
Take care that you do not pollute the data provided in response, by adding your own data.
Check that there are no back-ticks(`) in the output.
Check that html syntax is correct.
'''
result_prompt = PromptTemplate(input_variables=['response'], template=result_template)
result_chain = LLMChain(llm=llm, prompt=result_prompt)
# print(result_chain.run({'response': result}))
# #__________________________________________________________________________________________________
def get_response(query):
print('\n\n\n\n', query)
li = get_nodes_chain.run(query)
print(li)
if type(li)==str:
li = ast.literal_eval(li)
cypher = cypher_chain.run({"List_of_nodes" : li, 'natural_query':query})
print('\n\n\n', cypher,'\n\n\n')
result = graph.query(cypher)
print('\n\n\n', result)
response = result_chain.run({'response': result})
# print('\n\n\n', response)
return response
# x = get_response('what ctc is offered for cgpa below 7, sort the ctc in descending order')
# x = get_response("list companies with ctc above 30")
# x= get_response("list companies with ctc above 30 and cgpa below 8")
# x = get_response('name 10 companies which offered ctc above 20')
# x = get_response('jobProfiles available for cgpa below 7')
# print(x)
| [
"langchain.chains.LLMChain",
"langchain.graphs.neo4j_graph.Neo4jGraph",
"langchain.prompts.FewShotPromptTemplate",
"langchain.prompts.PromptTemplate"
] | [((83, 115), 'sys.path.append', 'sys.path.append', (['"""backendPython"""'], {}), "('backendPython')\n", (98, 115), False, 'import sys\n'), ((392, 526), 'langchain.graphs.neo4j_graph.Neo4jGraph', 'neo4j_graph.Neo4jGraph', ([], {'url': "os.environ['NEO4J_URI']", 'username': "os.environ['NEO4J_USERNAME']", 'password': "os.environ['NEO4J_PASSWORD']"}), "(url=os.environ['NEO4J_URI'], username=os.environ[\n 'NEO4J_USERNAME'], password=os.environ['NEO4J_PASSWORD'])\n", (414, 526), True, 'import langchain.graphs.neo4j_graph as neo4j_graph\n'), ((2133, 2226), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['Query', 'syntax']", 'template': 'example_formatter_template'}), "(input_variables=['Query', 'syntax'], template=\n example_formatter_template)\n", (2147, 2226), False, 'from langchain.prompts import PromptTemplate, FewShotPromptTemplate\n'), ((2940, 3125), 'langchain.prompts.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'example_prompt', 'prefix': 'prefix', 'suffix': 'suffix', 'input_variables': "['List_of_nodes', 'natural_query']", 'example_separator': '"""\n"""'}), "(examples=examples, example_prompt=example_prompt,\n prefix=prefix, suffix=suffix, input_variables=['List_of_nodes',\n 'natural_query'], example_separator='\\n')\n", (2961, 3125), False, 'from langchain.prompts import PromptTemplate, FewShotPromptTemplate\n'), ((3431, 3487), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'few_shot_prompt', 'verbose': '(False)'}), '(llm=llm, prompt=few_shot_prompt, verbose=False)\n', (3439, 3487), False, 'from langchain.chains import LLMChain\n'), ((4337, 4407), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['response']", 'template': 'result_template'}), "(input_variables=['response'], template=result_template)\n", (4351, 4407), False, 'from langchain.prompts import PromptTemplate, FewShotPromptTemplate\n'), ((4424, 4463), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'result_prompt'}), '(llm=llm, prompt=result_prompt)\n', (4432, 4463), False, 'from langchain.chains import LLMChain\n'), ((191, 204), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (202, 204), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((4679, 4705), 'neo4j_dir.entities.get_nodes_chain.run', 'get_nodes_chain.run', (['query'], {}), '(query)\n', (4698, 4705), False, 'from neo4j_dir.entities import get_nodes_chain\n'), ((4755, 4775), 'ast.literal_eval', 'ast.literal_eval', (['li'], {}), '(li)\n', (4771, 4775), False, 'import ast\n')] |
import langchain_helper as lch
import streamlit as st
st.title("Pet Name Generator")
animal_type = st.sidebar.selectbox("What is your pet?",("Cat","Dog","Bird","Rabbit"))
if animal_type =='Cat':
pet_color = st.sidebar.text_area("What is the color of your cat?",max_chars=10)
if animal_type =='Dog':
pet_color = st.sidebar.text_area("What is the color of your dog?",max_chars=10)
if animal_type =='Bird':
pet_color = st.sidebar.text_area("What is the color of your bird?",max_chars=10)
if animal_type =='Rabbit':
pet_color = st.sidebar.text_area("What is the color of your rabbit?",max_chars=10)
if pet_color:
response = lch.generate_pet_name(animal_type,pet_color)
# st.text(response)
st.write("Here are some cool names for your pet:")
st.write(response['pet_name']) | [
"langchain_helper.generate_pet_name"
] | [((55, 85), 'streamlit.title', 'st.title', (['"""Pet Name Generator"""'], {}), "('Pet Name Generator')\n", (63, 85), True, 'import streamlit as st\n'), ((101, 176), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""What is your pet?"""', "('Cat', 'Dog', 'Bird', 'Rabbit')"], {}), "('What is your pet?', ('Cat', 'Dog', 'Bird', 'Rabbit'))\n", (121, 176), True, 'import streamlit as st\n'), ((214, 282), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""What is the color of your cat?"""'], {'max_chars': '(10)'}), "('What is the color of your cat?', max_chars=10)\n", (234, 282), True, 'import streamlit as st\n'), ((327, 395), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""What is the color of your dog?"""'], {'max_chars': '(10)'}), "('What is the color of your dog?', max_chars=10)\n", (347, 395), True, 'import streamlit as st\n'), ((437, 506), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""What is the color of your bird?"""'], {'max_chars': '(10)'}), "('What is the color of your bird?', max_chars=10)\n", (457, 506), True, 'import streamlit as st\n'), ((550, 621), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""What is the color of your rabbit?"""'], {'max_chars': '(10)'}), "('What is the color of your rabbit?', max_chars=10)\n", (570, 621), True, 'import streamlit as st\n'), ((651, 696), 'langchain_helper.generate_pet_name', 'lch.generate_pet_name', (['animal_type', 'pet_color'], {}), '(animal_type, pet_color)\n', (672, 696), True, 'import langchain_helper as lch\n'), ((724, 774), 'streamlit.write', 'st.write', (['"""Here are some cool names for your pet:"""'], {}), "('Here are some cool names for your pet:')\n", (732, 774), True, 'import streamlit as st\n'), ((779, 809), 'streamlit.write', 'st.write', (["response['pet_name']"], {}), "(response['pet_name'])\n", (787, 809), True, 'import streamlit as st\n')] |
# Import langchain modules
from langchain.memory import Memory, ConversationBufferMemory
from langchain.agents import BaseMultiActionAgent, AgentExecutor
# Import other modules and classes
from research_agent import ResearchAgent
class ConversationMemory(Memory):
def __init__(self):
# Initialize the parent class with an empty dictionary
super().__init__(memory={})
# Initialize the conversation buffer memory attribute
self.conversation_buffer_memory = ConversationBufferMemory()
def set(self, key: str, value: str) -> None:
# Store the value in the memory dictionary
self.memory[key] = value
# Add the value to the conversation buffer memory
self.conversation_buffer_memory.add(value)
def get(self, key: str) -> str:
# Retrieve the value from the memory dictionary
return self.memory.get(key)
# Create a ConversationMemory instance
conversation_memory = ConversationMemory()
# Create an AgentExecutor instance with conversation_memory as a parameter
agent_executor = AgentExecutor(
agent=ResearchAgent(prompt_template, language_model, stop_sequence, output_parser),
memory=conversation_memory,
max_turns=10
)
# Run the AgentExecutor
agent_executor.run()
| [
"langchain.memory.ConversationBufferMemory"
] | [((505, 531), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (529, 531), False, 'from langchain.memory import Memory, ConversationBufferMemory\n'), ((1122, 1198), 'research_agent.ResearchAgent', 'ResearchAgent', (['prompt_template', 'language_model', 'stop_sequence', 'output_parser'], {}), '(prompt_template, language_model, stop_sequence, output_parser)\n', (1135, 1198), False, 'from research_agent import ResearchAgent\n')] |
from langchain.llms import HuggingFacePipeline
import langchain
from ingest import create_vector_db
from langchain.cache import InMemoryCache
from langchain.schema import prompt
from langchain.chains import RetrievalQA
from langchain.callbacks import StdOutCallbackHandler
from langchain import PromptTemplate
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
DB_FAISS_PATH = 'vectorstoredb/db_faiss'
langchain.llm_cache = InMemoryCache()
PROMPT_TEMPLATE = '''
Use the following pieces of information to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context: {context}
Question: {question}
Do provide only helpful answers
Helpful answer:
'''
handler = StdOutCallbackHandler()
def set_custom_prompt():
input_variables = ['context', 'question']
prompt = PromptTemplate(template=PROMPT_TEMPLATE, input_variables=input_variables)
return prompt
def load_retriever():
return create_vector_db
def load_llm():
model_name = 'TheBloke/Mistral-7B-Instruct-v0.1-GPTQ'
model = AutoModelForCausalLM.from_pretrained(model_name,
device_map="auto",
trust_remote_code=False,
revision="gptq-8bit-32g-actorder_True")
tokenizer = AutoTokenizer.from_pretrained(model_name,use_fast = True)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=512,
do_sample=True,
temperature=0.1,
top_p=0.95,
top_k=40,
repetition_penalty=1.1
)
llm = HuggingFacePipeline(pipeline=pipe)
return llm
def retrieval_qa_chain(llm, prompt, retriever):
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever = retriever,
verbose=True,
callbacks=[handler],
chain_type_kwargs={"prompt": prompt},
return_source_documents=True
)
return qa_chain
def final_result(query):
llm = load_llm()
retriever = load_retriever()
prompt = set_custom_prompt()
qa_result = retrieval_qa_chain(llm, prompt, retriever=retriever)
response = qa_result({"query": query})
return response
if __name__ == "__main__":
query = "What is the description of the accord form?"
final_result(query=query)
| [
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.llms.HuggingFacePipeline",
"langchain.callbacks.StdOutCallbackHandler",
"langchain.cache.InMemoryCache",
"langchain.PromptTemplate"
] | [((448, 463), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (461, 463), False, 'from langchain.cache import InMemoryCache\n'), ((754, 777), 'langchain.callbacks.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (775, 777), False, 'from langchain.callbacks import StdOutCallbackHandler\n'), ((862, 935), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'PROMPT_TEMPLATE', 'input_variables': 'input_variables'}), '(template=PROMPT_TEMPLATE, input_variables=input_variables)\n', (876, 935), False, 'from langchain import PromptTemplate\n'), ((1092, 1228), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_name'], {'device_map': '"""auto"""', 'trust_remote_code': '(False)', 'revision': '"""gptq-8bit-32g-actorder_True"""'}), "(model_name, device_map='auto',\n trust_remote_code=False, revision='gptq-8bit-32g-actorder_True')\n", (1128, 1228), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n'), ((1377, 1433), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {'use_fast': '(True)'}), '(model_name, use_fast=True)\n', (1406, 1433), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n'), ((1446, 1615), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model', 'tokenizer': 'tokenizer', 'max_new_tokens': '(512)', 'do_sample': '(True)', 'temperature': '(0.1)', 'top_p': '(0.95)', 'top_k': '(40)', 'repetition_penalty': '(1.1)'}), "('text-generation', model=model, tokenizer=tokenizer,\n max_new_tokens=512, do_sample=True, temperature=0.1, top_p=0.95, top_k=\n 40, repetition_penalty=1.1)\n", (1454, 1615), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n'), ((1696, 1730), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'pipe'}), '(pipeline=pipe)\n', (1715, 1730), False, 'from langchain.llms import HuggingFacePipeline\n'), ((1810, 2000), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever', 'verbose': '(True)', 'callbacks': '[handler]', 'chain_type_kwargs': "{'prompt': prompt}", 'return_source_documents': '(True)'}), "(llm=llm, chain_type='stuff', retriever=\n retriever, verbose=True, callbacks=[handler], chain_type_kwargs={\n 'prompt': prompt}, return_source_documents=True)\n", (1837, 2000), False, 'from langchain.chains import RetrievalQA\n')] |
"""
implement the actions as tools so we can validate inputs
"""
import langchain
from langchain.schema import AgentAction, AgentFinish
from langchain.schema.output import LLMResult
from langchain.agents import AgentType, initialize_agent
from langchain.tools import Tool, StructuredTool
from langchain.tools.base import ToolException
from langchain.chat_models import ChatOpenAI
from langchain.callbacks.base import BaseCallbackHandler
from typing import Any, Dict
# from pydantic import BaseModel, Field, root_validator
# from pydantic import Field, root_validator
from pydantic.v1 import BaseModel, Field, root_validator
# from pydantic.v1 import BaseModel
from typing import Optional
from browser_env.actions import (
Action,
ActionParsingError,
create_id_based_action,
create_none_action,
create_playwright_action,
)
from browser_env import (
Action,
ActionTypes,
ScriptBrowserEnv,
StateInfo,
Trajectory,
create_stop_action,
)
from browser_env.helper_functions import (
RenderHelper,
get_action_description,
)
from browser_env.env_config import URL_MAPPINGS
import my_globals
class LLMOutputHandler(BaseCallbackHandler):
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
"""Run when LLM ends running."""
# do this to pass LLM outputs to render_helper. cos the intermediate_steps alr
# uses output_parser which throws away raw LLM output
my_globals.llm_response = response.generations[-1][0].text
def init_tools_args(args, lm_config, tokenizer):
langchain.verbose = True
SHOULD_RETURN_DIRECT = args.tools_return_true
PREFIX = """You are an autonomous intelligent agent tasked with completing an objective via navigating a web browser.
This objective will be accomplished through the use of specific actions (tools) you can issue.
Here's the information you'll have:
The user's objective: This is the task you're trying to complete.
The current web page's accessibility tree: This is a simplified representation of the webpage, providing key information.
The accessibility tree is of the form `[element_id] Text describing the element` (i.e. the element id is to the left of the description)
The current web page's URL: This is the page you're currently navigating.
The open tabs: These are the tabs you have open.
The previous action: This is the action you just performed. It may be helpful to track your progress.
Homepage:
If you want to visit other websites, check out the homepage at http://homepage.com. It has a list of websites you can visit.
Respond to the human as helpfully and accurately as possible.
You have access to the following tools:"""
SUFFIX = """
=====Start of example 1 =====
YOUR CURRENT OBJECTIVE: What is the price of HP Inkjet Fax Machine
PREVIOUS ACTION: None
Observation:
URL: http://onestopmarket.com/office-products/office-electronics.html
Accessibility tree:
[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'
[1749] StaticText '$279.49'
[1757] button 'Add to Cart'
[1760] button 'Add to Wish List'
[1761] button 'Add to Compare'
Thought: Let's think step-by-step. This page list the information of HP Inkjet Fax Machine, which is the product identified in the objective. Its price is $279.49. I think I have achieved the objective. I will issue the stop action with the answer.
Action:
```
{{
"action": "stop",
"action_input": {{"final_answer" : "$279.49"}}
}}
```
=====End of example 1 =====
=====Start of example 2 =====
YOUR CURRENT OBJECTIVE: Show me the restaurants near CMU
PREVIOUS ACTION: None
Observation:
URL: http://openstreetmap.org
Accessibility tree:
[164] textbox 'Search' focused: True required: False
[171] button 'Go'
[174] link 'Find directions between two points'
[212] heading 'Search Results'
[216] button 'Close'
Thought: Let's think step-by-step. This page has a search box whose ID is [164]. According to the nominatim rule of openstreetmap, I can search for the restaurants near a location by \"restaurants near\". I can submit my typing by pressing the Enter afterwards.
Action:
```
{{
"action": "type_into_field",
"action_input": {{"element_id": 164, content: "restaurants near CMU", press_enter_after: 1}}
}}
```
=====End of example 2 =====
To be successful, it is very important to follow the following rules:
1. You should only issue an action that is valid given the current observation
2. In your thoughts, you should follow the examples to reason step by step and then issue the next action.
3. Think whether your action makes sense. For example, it is pointless to click on static texts as it does nothing.
4. Issue stop action when you think you have achieved the objective.
Begin! Reminder to ALWAYS respond with a valid json blob of a single action."""
# REMOVE scratchpad for context limit. leave variable there for validation but display 0 chars
# try memory variables or sth?
HUMAN_MESSAGE_TEMPLATE = '{input}\n\n{agent_scratchpad:0.0}'
FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: {tool_names}
Provide only ONE action per $JSON_BLOB, as shown:
```
{{{{
"action": $TOOL_NAME,
"action_input": $INPUT
}}}}
```
Follow this format:
Thought: consider previous and subsequent steps, reason step by step what the best next action should be
Action:
```
$JSON_BLOB
```
Observation: action result / accessibility tree / URL
... (repeat Thought/Action/Observation N times)
Thought: The objective can be achieved now (and explain step by step why this is so). I know what to respond.
Action:
```
{{{{
"action": "stop",
"action_input": {{{{"final_answer" : "Final response to human"}}}}
}}}}
```"""
a_kwargs = {
'prefix': PREFIX,
'format_instructions': FORMAT_INSTRUCTIONS,
'suffix': SUFFIX,
'human_message_template': HUMAN_MESSAGE_TEMPLATE,
}
ex1 = """
Example:
### Example start ###
Observation:
[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'
[1749] StaticText '$279.49'
[1757] button 'Add to Cart'
In this example, there is a link with element id 1744 and a button with element id 1757. If you want to click on element
id 1757, issue this function with 1757 as the parameter. Note that element 1749 is static text so it is not clickable
### Example end ###
"""
ex2 = """
Example:
### Example start ###
Observation:
[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'
[1749] StaticText '$279.49'
[1757] button 'Add to Cart'
OBJECTIVE: What is the price of HP Inkjet Fax Machine
In this example we see that this page lists the information of HP Inkjet Fax Machine, which is the product identified in the objective. Its price is $279.49. Since you have achieved the objective, you will issue the stop action with the parameter $279.49
### Example end ###
"""
def map_url_to_real(url: str) -> str:
"""Map the urls to their real world counterparts"""
for i, j in URL_MAPPINGS.items():
if i in url:
url = url.replace(i, j)
return url
def map_url_to_local(url: str) -> str:
"""Map the urls to their local counterparts"""
for i, j in URL_MAPPINGS.items():
if j in url:
url = url.replace(j, i)
return url
def create_action_from_str(parsed_response):
# TODO: allow for playwright actions
try:
action = create_id_based_action(parsed_response)
except ActionParsingError as e:
action = create_none_action()
action["raw_prediction"] = my_globals.llm_response
return action
def create_append_action(issued_action_str):
action = create_action_from_str(issued_action_str)
print(f'action: {action}')
my_globals.trajectory.append(action)
action_str = get_action_description(
action,
my_globals.state_info["info"]["observation_metadata"],
action_set_tag=args.action_set_tag,
prompt_constructor=None,
)
print(f'action str: {action_str}')
my_globals.render_helper.render(
action, my_globals.state_info, my_globals.meta_data, args.render_screenshot
)
my_globals.meta_data["action_history"].append(action_str)
return action
def execute_action(issued_action_str):
# TODO: early stop, action error handling
action = create_append_action(issued_action_str)
# Note: this is meant to break loop if agent issues stop or early stopping
# Since our agent's stop already breaks out of loop, we only need to
# handle early stopping later
# if action["action_type"] == ActionTypes.STOP:
# return STOP_SEQUENCE
obs, _, terminated, _, info = my_globals.env.step(action)
my_globals.state_info = {"observation": obs, "info": info}
my_globals.trajectory.append(my_globals.state_info)
# Note: the ScriptBrowserEnv always returns False for terminated
# if terminated:
# my_globals.trajectory.append(create_stop_action(""))
# return STOP_SEQUENCE
accessibility_tree = obs["text"]
max_obs_length = lm_config.gen_config["max_obs_length"]
if max_obs_length:
accessibility_tree = tokenizer.decode(tokenizer.encode(accessibility_tree)[:max_obs_length])
page = info["page"]
url = page.url
url = map_url_to_real(url)
return f"\n URL: \n {url} \n Accessibility tree: \n {accessibility_tree} \n"
def _handle_error(error: ToolException) -> str:
return (
my_globals.tool_error_start
+ str(error.args[0])
+ " Please try other valid input(s) or tools."
)
def validate_element_id(fn_name: str, element_id: int):
if str(element_id) not in my_globals.state_info['info']['observation_metadata']['text']['obs_nodes_info']:
raise ToolException(f"Attempted to use tool {fn_name} on invalid element_id {element_id}\n"
f"The available element ids are {my_globals.state_info['info']['observation_metadata']['text']['obs_nodes_info'].keys()}"
)
# class ElementToolInputSchema(BaseModel):
# element_id: int = Field()
# @root_validator(skip_on_failure=True)
# def validate_query(cls, values: Dict[str, Any]) -> Dict:
# element_id = values["element_id"]
# if str(element_id) not in my_globals.state_info['info']['observation_metadata']['text']['obs_nodes_info']:
# raise ToolException(
# f"Element id {element_id} is not available for current observation\n"
# f"The available element ids are {my_globals.state_info['info']['observation_metadata']['text']['obs_nodes_info'].keys()}"
# )
# return values
def click(element_id: int):
validate_element_id('click', element_id)
return execute_action(f"click [{element_id}]")
click_tool = StructuredTool.from_function(
name="click",
func=click,
description="This action clicks on an element specified by the element_id in the input.",
return_direct=SHOULD_RETURN_DIRECT,
# args_schema=ElementToolInputSchema,
handle_tool_error=_handle_error,
)
STOP_TOOL_DESC = """Issue this action when you believe the task is complete. If the objective is to find a text-based answer, provide the answer as an input to this tool. If you believe the task is impossible to complete, provide the answer as "N/A" as the input to this tool."""
def stop(final_answer: Optional[str] = ''):
action_str = f"stop [{final_answer}]"
action = create_append_action(action_str)
return action_str
stop_tool = StructuredTool.from_function(
name="stop",
func=stop,
description=STOP_TOOL_DESC,
return_direct=True,
)
# class TypeToolInputSchema(BaseModel):
# element_id: int = Field()
# content: str = Field()
# press_enter_after: int = Field()
#
# @root_validator(skip_on_failure=True)
# def validate_query(cls, values: Dict[str, Any]) -> Dict:
# element_id = values["element_id"]
# press_enter_after = values["press_enter_after"]
# if str(element_id) not in my_globals.state_info['info']['observation_metadata']['text']['obs_nodes_info']:
# raise ToolException(
# f"Element id {element_id} is not available for current observation\n"
# f"The available element ids are {my_globals.state_info['info']['observation_metadata']['text']['obs_nodes_info'].keys()}"
# )
# if press_enter_after not in [1, 0]:
# raise ToolException(
# f"press_enter_after value {press_enter_after} is not valid. It should be 1 or 0."
# )
# return values
TYPE_TOOL_DESC = """Use this to type the content into the field with element_id. press_enter_after is either 1 or 0. By default, the "Enter" key is pressed after typing unless press_enter_after is set to 0."""
def validate_press_enter(fn_name: str, press_enter_after: int):
if press_enter_after not in [1, 0]:
raise ToolException(
f"Attempted to use tool {fn_name} on invalid press_enter_after value {press_enter_after}. It should be 1 or 0."
)
def type_into_field(element_id: int, content: str, press_enter_after: Optional[int] = 1):
validate_element_id('type_into_field', element_id)
validate_press_enter('type_into_field', press_enter_after)
return execute_action(f"type [{element_id}] [{content}] [{press_enter_after}]")
type_tool = StructuredTool.from_function(
name="type_into_field",
func=type_into_field,
description=TYPE_TOOL_DESC,
return_direct=SHOULD_RETURN_DIRECT,
# args_schema=TypeToolInputSchema,
handle_tool_error=_handle_error,
)
def hover(element_id: int):
validate_element_id('hover', element_id)
return execute_action(f"hover [{element_id}]")
hover_tool = StructuredTool.from_function(
name="hover",
func=hover,
description="Hover over an element with element_id.",
return_direct=SHOULD_RETURN_DIRECT,
# args_schema=ElementToolInputSchema,
handle_tool_error=_handle_error,
)
def press(key_comb: str):
# TODO: consider validation for this
return execute_action(f"press [{key_comb}]")
press_tool = StructuredTool.from_function(
name="press",
func=press,
description="Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v).",
return_direct=SHOULD_RETURN_DIRECT,
)
# class ScrollToolInputSchema(BaseModel):
# direction: str = Field()
#
# @root_validator(skip_on_failure=True)
# def validate_query(cls, values: Dict[str, Any]) -> Dict:
# direction = values["direction"]
# if direction not in ["up", "down"]:
# raise ToolException(
# f"direction {direction} is not valid. It should be up or down"
# )
# return values
def validate_scroll(fn_name: str, direction: str):
if direction not in ["up", "down"]:
raise ToolException(
f"Attempted to use tool {fn_name} on invalid direction {direction}. It should be up or down."
)
def scroll(direction: str):
validate_scroll('scroll', direction)
return execute_action(f"scroll [{direction}]")
scroll_tool = StructuredTool.from_function(
name="scroll",
func=scroll,
description="Scroll the page. Specify the direction (up or down) in the input",
return_direct=SHOULD_RETURN_DIRECT,
# args_schema=ScrollToolInputSchema,
handle_tool_error=_handle_error,
)
def new_tab():
return execute_action("new_tab")
new_tab_tool = StructuredTool.from_function(
name="new_tab",
func=new_tab,
description="Open a new, empty browser tab",
return_direct=SHOULD_RETURN_DIRECT,
)
# class TabFocusToolInputSchema(BaseModel):
# tab_index: int = Field()
#
# @root_validator(skip_on_failure=True)
# def validate_query(cls, values: Dict[str, Any]) -> Dict:
# tab_index = values["tab_index"]
# tab_title_str, content = my_globals.state_info['observation'].split('\n\n')
# tabs = tab_title_str.split(" | ")
#
# if tab_index not in range(len(tabs)):
# raise ToolException(
# f"tab_index {tab_index} is not valid"
# f"The available tab_index are {list(range(len(tabs)))}"
# )
# return values
def validate_tab_focus(fn_name: str, tab_index: int):
tab_title_str, content = my_globals.state_info['observation'].split('\n\n')
tabs = tab_title_str.split(" | ")
if tab_index not in range(len(tabs)):
raise ToolException(
f"Attempted to use tool {fn_name} on invalid tab_index {tab_index}."
f"The available tab_index are {list(range(len(tabs)))}"
)
def tab_focus(tab_index: int):
validate_tab_focus('tab_focus', tab_index)
return execute_action(f"tab_focus [{tab_index}]")
tab_focus_tool = StructuredTool.from_function(
name="tab_focus",
func=tab_focus,
description="Switch the browser's focus to a specific tab using its index",
return_direct=SHOULD_RETURN_DIRECT,
# args_schema=TabFocusToolInputSchema,
handle_tool_error=_handle_error,
)
def close_tab():
return execute_action("close_tab")
close_tab_tool = StructuredTool.from_function(
name="close_tab",
func=close_tab,
description="Close the currently active tab.",
return_direct=SHOULD_RETURN_DIRECT,
)
def go_back():
return execute_action("go_back")
go_back_tool = StructuredTool.from_function(
name="go_back",
func=go_back,
description="Navigate to the previously viewed page.",
return_direct=SHOULD_RETURN_DIRECT,
)
def go_forward():
return execute_action("go_forward")
go_forward_tool = StructuredTool.from_function(
name="go_forward",
func=go_forward,
description="Navigate to the next page (if a previous 'go_back' action was performed).",
return_direct=SHOULD_RETURN_DIRECT,
)
_APPROVED_DOMAINS = set(URL_MAPPINGS.values())
_APPROVED_DOMAINS.update(['docs.gitlab.com', 'experienceleague.adobe.com'])
# class GotoToolInputSchema(BaseModel):
# url: str = Field()
#
# @root_validator(skip_on_failure=True)
# def validate_query(cls, values: Dict[str, Any]) -> Dict:
# url = values["url"]
# for approved_domain in _APPROVED_DOMAINS:
# if approved_domain in url:
# return values
#
# raise ToolException(
# f"url {url} is not valid\n"
# f"The valid urls must contain any of {_APPROVED_DOMAINS}"
# )
def validate_url(fn_name: str, url: str):
for approved_domain in _APPROVED_DOMAINS:
if approved_domain in url:
return
raise ToolException(
f"Attempted to use tool {fn_name} on invalid url {url}\n"
f"The valid urls must contain any of {_APPROVED_DOMAINS}"
)
def goto(url: str):
validate_url('goto', url)
return execute_action(f"goto [{map_url_to_local(url)}]")
goto_tool = StructuredTool.from_function(
name="goto",
func=goto,
description="Navigate to a specific URL.",
return_direct=SHOULD_RETURN_DIRECT,
# args_schema=GotoToolInputSchema,
handle_tool_error=_handle_error,
)
return [stop_tool, click_tool, type_tool, hover_tool, press_tool, scroll_tool, new_tab_tool, tab_focus_tool,
close_tab_tool, go_back_tool, go_forward_tool, goto_tool], a_kwargs
# return [click_tool], a_kwargs
def init_agent(args, lm_config, tokenizer):
def _handle_parsing_error(error) -> str:
msg = my_globals.parse_error_start + str(error)
return msg
tools, agent_kwargs = init_tools_args(args, lm_config, tokenizer)
handler = LLMOutputHandler()
llm = ChatOpenAI(temperature=lm_config.gen_config["temperature"], model_name=lm_config.model, callbacks=[handler],
top_p=lm_config.gen_config["top_p"], )
# stop token cant change cos need default 'observation'
# max_tokens=lm_config.gen_config["max_tokens"],
agent_chain = initialize_agent(tools, llm, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True, max_iterations=args.max_steps, agent_kwargs=agent_kwargs,
handle_parsing_errors=_handle_parsing_error)
return agent_chain
| [
"langchain.agents.initialize_agent",
"langchain.tools.base.ToolException",
"langchain.tools.StructuredTool.from_function",
"langchain.chat_models.ChatOpenAI"
] | [((11966, 12189), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""click"""', 'func': 'click', 'description': '"""This action clicks on an element specified by the element_id in the input."""', 'return_direct': 'SHOULD_RETURN_DIRECT', 'handle_tool_error': '_handle_error'}), "(name='click', func=click, description=\n 'This action clicks on an element specified by the element_id in the input.'\n , return_direct=SHOULD_RETURN_DIRECT, handle_tool_error=_handle_error)\n", (11994, 12189), False, 'from langchain.tools import Tool, StructuredTool\n'), ((12762, 12867), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""stop"""', 'func': 'stop', 'description': 'STOP_TOOL_DESC', 'return_direct': '(True)'}), "(name='stop', func=stop, description=\n STOP_TOOL_DESC, return_direct=True)\n", (12790, 12867), False, 'from langchain.tools import Tool, StructuredTool\n'), ((14816, 14995), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""type_into_field"""', 'func': 'type_into_field', 'description': 'TYPE_TOOL_DESC', 'return_direct': 'SHOULD_RETURN_DIRECT', 'handle_tool_error': '_handle_error'}), "(name='type_into_field', func=type_into_field,\n description=TYPE_TOOL_DESC, return_direct=SHOULD_RETURN_DIRECT,\n handle_tool_error=_handle_error)\n", (14844, 14995), False, 'from langchain.tools import Tool, StructuredTool\n'), ((15246, 15433), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""hover"""', 'func': 'hover', 'description': '"""Hover over an element with element_id."""', 'return_direct': 'SHOULD_RETURN_DIRECT', 'handle_tool_error': '_handle_error'}), "(name='hover', func=hover, description=\n 'Hover over an element with element_id.', return_direct=\n SHOULD_RETURN_DIRECT, handle_tool_error=_handle_error)\n", (15274, 15433), False, 'from langchain.tools import Tool, StructuredTool\n'), ((15677, 15868), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""press"""', 'func': 'press', 'description': '"""Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v)."""', 'return_direct': 'SHOULD_RETURN_DIRECT'}), "(name='press', func=press, description=\n 'Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v).'\n , return_direct=SHOULD_RETURN_DIRECT)\n", (15705, 15868), False, 'from langchain.tools import Tool, StructuredTool\n'), ((16807, 17021), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""scroll"""', 'func': 'scroll', 'description': '"""Scroll the page. Specify the direction (up or down) in the input"""', 'return_direct': 'SHOULD_RETURN_DIRECT', 'handle_tool_error': '_handle_error'}), "(name='scroll', func=scroll, description=\n 'Scroll the page. Specify the direction (up or down) in the input',\n return_direct=SHOULD_RETURN_DIRECT, handle_tool_error=_handle_error)\n", (16835, 17021), False, 'from langchain.tools import Tool, StructuredTool\n'), ((17198, 17342), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""new_tab"""', 'func': 'new_tab', 'description': '"""Open a new, empty browser tab"""', 'return_direct': 'SHOULD_RETURN_DIRECT'}), "(name='new_tab', func=new_tab, description=\n 'Open a new, empty browser tab', return_direct=SHOULD_RETURN_DIRECT)\n", (17226, 17342), False, 'from langchain.tools import Tool, StructuredTool\n'), ((18689, 18905), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""tab_focus"""', 'func': 'tab_focus', 'description': '"""Switch the browser\'s focus to a specific tab using its index"""', 'return_direct': 'SHOULD_RETURN_DIRECT', 'handle_tool_error': '_handle_error'}), '(name=\'tab_focus\', func=tab_focus, description=\n "Switch the browser\'s focus to a specific tab using its index",\n return_direct=SHOULD_RETURN_DIRECT, handle_tool_error=_handle_error)\n', (18717, 18905), False, 'from langchain.tools import Tool, StructuredTool\n'), ((19090, 19240), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""close_tab"""', 'func': 'close_tab', 'description': '"""Close the currently active tab."""', 'return_direct': 'SHOULD_RETURN_DIRECT'}), "(name='close_tab', func=close_tab, description=\n 'Close the currently active tab.', return_direct=SHOULD_RETURN_DIRECT)\n", (19118, 19240), False, 'from langchain.tools import Tool, StructuredTool\n'), ((19366, 19525), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""go_back"""', 'func': 'go_back', 'description': '"""Navigate to the previously viewed page."""', 'return_direct': 'SHOULD_RETURN_DIRECT'}), "(name='go_back', func=go_back, description=\n 'Navigate to the previously viewed page.', return_direct=\n SHOULD_RETURN_DIRECT)\n", (19394, 19525), False, 'from langchain.tools import Tool, StructuredTool\n'), ((19655, 19858), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""go_forward"""', 'func': 'go_forward', 'description': '"""Navigate to the next page (if a previous \'go_back\' action was performed)."""', 'return_direct': 'SHOULD_RETURN_DIRECT'}), '(name=\'go_forward\', func=go_forward,\n description=\n "Navigate to the next page (if a previous \'go_back\' action was performed)."\n , return_direct=SHOULD_RETURN_DIRECT)\n', (19683, 19858), False, 'from langchain.tools import Tool, StructuredTool\n'), ((21080, 21253), 'langchain.tools.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'name': '"""goto"""', 'func': 'goto', 'description': '"""Navigate to a specific URL."""', 'return_direct': 'SHOULD_RETURN_DIRECT', 'handle_tool_error': '_handle_error'}), "(name='goto', func=goto, description=\n 'Navigate to a specific URL.', return_direct=SHOULD_RETURN_DIRECT,\n handle_tool_error=_handle_error)\n", (21108, 21253), False, 'from langchain.tools import Tool, StructuredTool\n'), ((21870, 22020), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': "lm_config.gen_config['temperature']", 'model_name': 'lm_config.model', 'callbacks': '[handler]', 'top_p': "lm_config.gen_config['top_p']"}), "(temperature=lm_config.gen_config['temperature'], model_name=\n lm_config.model, callbacks=[handler], top_p=lm_config.gen_config['top_p'])\n", (21880, 22020), False, 'from langchain.chat_models import ChatOpenAI\n'), ((22177, 22396), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)', 'max_iterations': 'args.max_steps', 'agent_kwargs': 'agent_kwargs', 'handle_parsing_errors': '_handle_parsing_error'}), '(tools, llm, agent=AgentType.\n STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True,\n max_iterations=args.max_steps, agent_kwargs=agent_kwargs,\n handle_parsing_errors=_handle_parsing_error)\n', (22193, 22396), False, 'from langchain.agents import AgentType, initialize_agent\n'), ((7759, 7779), 'browser_env.env_config.URL_MAPPINGS.items', 'URL_MAPPINGS.items', ([], {}), '()\n', (7777, 7779), False, 'from browser_env.env_config import URL_MAPPINGS\n'), ((7991, 8011), 'browser_env.env_config.URL_MAPPINGS.items', 'URL_MAPPINGS.items', ([], {}), '()\n', (8009, 8011), False, 'from browser_env.env_config import URL_MAPPINGS\n'), ((8602, 8638), 'my_globals.trajectory.append', 'my_globals.trajectory.append', (['action'], {}), '(action)\n', (8630, 8638), False, 'import my_globals\n'), ((8663, 8818), 'browser_env.helper_functions.get_action_description', 'get_action_description', (['action', "my_globals.state_info['info']['observation_metadata']"], {'action_set_tag': 'args.action_set_tag', 'prompt_constructor': 'None'}), "(action, my_globals.state_info['info'][\n 'observation_metadata'], action_set_tag=args.action_set_tag,\n prompt_constructor=None)\n", (8685, 8818), False, 'from browser_env.helper_functions import RenderHelper, get_action_description\n'), ((8927, 9040), 'my_globals.render_helper.render', 'my_globals.render_helper.render', (['action', 'my_globals.state_info', 'my_globals.meta_data', 'args.render_screenshot'], {}), '(action, my_globals.state_info, my_globals.\n meta_data, args.render_screenshot)\n', (8958, 9040), False, 'import my_globals\n'), ((9646, 9673), 'my_globals.env.step', 'my_globals.env.step', (['action'], {}), '(action)\n', (9665, 9673), False, 'import my_globals\n'), ((9751, 9802), 'my_globals.trajectory.append', 'my_globals.trajectory.append', (['my_globals.state_info'], {}), '(my_globals.state_info)\n', (9779, 9802), False, 'import my_globals\n'), ((19920, 19941), 'browser_env.env_config.URL_MAPPINGS.values', 'URL_MAPPINGS.values', ([], {}), '()\n', (19939, 19941), False, 'from browser_env.env_config import URL_MAPPINGS\n'), ((20765, 20904), 'langchain.tools.base.ToolException', 'ToolException', (['f"""Attempted to use tool {fn_name} on invalid url {url}\nThe valid urls must contain any of {_APPROVED_DOMAINS}"""'], {}), '(\n f"""Attempted to use tool {fn_name} on invalid url {url}\nThe valid urls must contain any of {_APPROVED_DOMAINS}"""\n )\n', (20778, 20904), False, 'from langchain.tools.base import ToolException\n'), ((8234, 8273), 'browser_env.actions.create_id_based_action', 'create_id_based_action', (['parsed_response'], {}), '(parsed_response)\n', (8256, 8273), False, 'from browser_env.actions import Action, ActionParsingError, create_id_based_action, create_none_action, create_playwright_action\n'), ((14324, 14460), 'langchain.tools.base.ToolException', 'ToolException', (['f"""Attempted to use tool {fn_name} on invalid press_enter_after value {press_enter_after}. It should be 1 or 0."""'], {}), "(\n f'Attempted to use tool {fn_name} on invalid press_enter_after value {press_enter_after}. It should be 1 or 0.'\n )\n", (14337, 14460), False, 'from langchain.tools.base import ToolException\n'), ((16508, 16626), 'langchain.tools.base.ToolException', 'ToolException', (['f"""Attempted to use tool {fn_name} on invalid direction {direction}. It should be up or down."""'], {}), "(\n f'Attempted to use tool {fn_name} on invalid direction {direction}. It should be up or down.'\n )\n", (16521, 16626), False, 'from langchain.tools.base import ToolException\n'), ((8337, 8357), 'browser_env.actions.create_none_action', 'create_none_action', ([], {}), '()\n', (8355, 8357), False, 'from browser_env.actions import Action, ActionParsingError, create_id_based_action, create_none_action, create_playwright_action\n')] |
import os
import streamlit as st
import langchain.memory
import langchain.llms
import langchain.chains
from apikey import apikey
from langchain.memory import ConversationBufferMemory
from langchain.memory import ChatMessageHistory
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
os.environ["OPENAI_API_KEY"] =apikey
def conversation_memory():
history = ChatMessageHistory()
history.add_user_message("hi!")
history.add_ai_message("whats up?")
memory = ConversationBufferMemory(chat_memory=history)
llm = OpenAI(temperature=0)
conversation = ConversationChain(llm=llm, memory=memory, verbose=True)
def predict(user_input):
response = conversation.predict(input=user_input)
return response
st.title("Conversation Memory Chatbot")
user_input = st.text_input("Enter your message:")
if user_input:
response = predict(user_input)
st.write("AI response:", response)
if __name__ == "__main__":
conversation_memory()
| [
"langchain.chains.ConversationChain",
"langchain.memory.ConversationBufferMemory",
"langchain.llms.OpenAI",
"langchain.memory.ChatMessageHistory"
] | [((447, 467), 'langchain.memory.ChatMessageHistory', 'ChatMessageHistory', ([], {}), '()\n', (465, 467), False, 'from langchain.memory import ChatMessageHistory\n'), ((558, 603), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'history'}), '(chat_memory=history)\n', (582, 603), False, 'from langchain.memory import ConversationBufferMemory\n'), ((615, 636), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (621, 636), False, 'from langchain.llms import OpenAI\n'), ((656, 711), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'llm': 'llm', 'memory': 'memory', 'verbose': '(True)'}), '(llm=llm, memory=memory, verbose=True)\n', (673, 711), False, 'from langchain.chains import ConversationChain\n'), ((829, 868), 'streamlit.title', 'st.title', (['"""Conversation Memory Chatbot"""'], {}), "('Conversation Memory Chatbot')\n", (837, 868), True, 'import streamlit as st\n'), ((887, 923), 'streamlit.text_input', 'st.text_input', (['"""Enter your message:"""'], {}), "('Enter your message:')\n", (900, 923), True, 'import streamlit as st\n'), ((991, 1025), 'streamlit.write', 'st.write', (['"""AI response:"""', 'response'], {}), "('AI response:', response)\n", (999, 1025), True, 'import streamlit as st\n')] |
import sys
import getpass
from dotenv import load_dotenv, dotenv_values
import pandas as pd
from IPython.display import display, Markdown, Latex, HTML, JSON
import langchain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from cmd import PROMPT
import os
from pyexpat.errors import messages
import openai
import tiktoken
sys.path.append(r"/Users/dovcohen/Documents/Projects/AI/NL2SQL")
#from .OpenAI_Func import Num_Tokens_From_String, OpenAI_Embeddings_Cost
from ChatGPT.src.lib.OpenAI_Func import Num_Tokens_From_String, OpenAI_Embeddings_Cost
from ChatGPT.src.lib.OpenAI_Func import Prompt_Cost, OpenAI_Usage_Cost
from ChatGPT.src.lib.DB_Func import execute_query, run_query
## Vector Datastore
from ChatGPT.src.lib.lib_OpenAI_Embeddings import VDS, OpenAI_Embeddings
class GenAI_NL2SQL():
def __init__(self, OPENAI_API_KEY, Model, Embedding_Model, Encoding_Base, Max_Tokens, Temperature, \
Token_Cost, DB, MYSQL_User, MYSQL_PWD, WD, VDSDB=None, VDSDB_Filename=None):
self._LLM_Model = Model
self._Embedding_Model = Embedding_Model
self._Encoding_Base = Encoding_Base
self._Max_Tokens = Max_Tokens
self._Temperature = Temperature
self._Token_Cost = Token_Cost
self._OpenAI_API_Key = OPENAI_API_KEY
self._DB = DB
self._MYSQL_Credemtals = {'User':MYSQL_User,'PWD':MYSQL_PWD}
self._WD = WD
self.Set_OpenAI_API_Key()
if VDSDB is not None:
self._VDSDB = VDSDB
self._VDS = VDS(VDSDB_Filename, Encoding_Base, Embedding_Model, Token_Cost, Max_Tokens)
self._VDS.Load_VDS_DF(Verbose=False)
def Set_OpenAI_API_Key(self):
openai.api_key = self._OpenAI_API_Key
return 1
def Print_Open_AI_Key(self):
print(self._OpenAI_API_Key)
def Print_MySQL_Keys(self):
print(self._MYSQL_Credemtals)
##############################################################################
def Prompt_Question(self, _Prompt_Template_, Inputs, Write_Template=True):
"""
"""
for i,j in Inputs.items():
Prompt = _Prompt_Template_.replace(i,j)
if Write_Template:
filename = f'{self._WD}/prompt_templates/Template_tmp.txt'
prompt_file = open(filename, 'w')
prompt_file.write(Prompt)
prompt_file.close()
return Prompt
###############################################################################
def Insert_N_Shot_Examples(self, _Prompt_Template_, N_Shot_Examples, Verbose=False):
"""
"""
# prepare Examples text
# Question = ....
# Query = ...
Examples = '\n'
for i in range(len(N_Shot_Examples['Question'])):
Examples += f"Question: {N_Shot_Examples['Question'][i]} \nQuery: {N_Shot_Examples['Query'][i]} \n\n"
# insert into template
Prompt_Template = _Prompt_Template_.replace('{EXAMPLES}', Examples)
if Verbose:
print(f'Insert_N_Shot_Examples: {Prompt_Template}')
return Prompt_Template
##############################################################################
def OpenAI_Completion(self, Prompt):
try:
#Make your OpenAI API request here
response = openai.Completion.create(
model=self._LLM_Model,
prompt=Prompt,
max_tokens=self._Max_Tokens,
temperature=self._Temperature,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
return -1
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
return -1
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
return -1
return(response)
#############################################################################
def OpenAI_ChatCompletion(self, Messages):
try:
response = openai.ChatCompletion.create(
model=self._LLM_Model,
messages=Messages,
max_tokens=self._Max_Tokens,
temperature=self._Temperature,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
return -1
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
return -1
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
return -1
return(response)
#############################################################################
def OpenAI_Response_Parser(self, Response, Debug=False):
if Debug:
print(f'Response {Response}')
id = Response['id']
object = Response['object']
if object == 'text_completion':
Txt = str(Response['choices'][0]['text'])
if Txt[0:7] == "\nQuery:":
Txt = Txt[7:]
elif object == 'chat.completion':
Txt = str(Response['choices'][0]['message']['content'])
else:
print(f'Type: {Type} is Unsupported ')
Txt = ''
return(Txt)
##############################################################################
def Prompt_Query(self, Prompt_Template, Question = '', N_Shot_Examples = None, Verbose=False,
Debug=False):
status = 0
df = pd.DataFrame()
# Prompt for Model 3-turbo
Prompt_Template = self.Insert_N_Shot_Examples(Prompt_Template, N_Shot_Examples)
# Construct prompt
Prompt = self.Prompt_Question(Prompt_Template,{'{Question}':Question})
# Estimate input prompt cost
Cost, Tokens_Used = Prompt_Cost(Prompt, self._LLM_Model, self._Token_Cost, self._Encoding_Base)
if Verbose:
print('Input')
print(f'Total Cost: {round(Cost,3)} Tokens Used {Tokens_Used}')
# Send prompt to LLM
Response = self.OpenAI_Completion(Prompt)
if Debug:
print(f'Prompt: \n',Prompt,'\n')
print('Response \n',Response,'\n')
Cost, Tokens_Used = OpenAI_Usage_Cost(Response, self._LLM_Model, self._Token_Cost )
if Verbose:
print('Output')
print(f'Total Cost: {round(Cost,3)} Tokens Used {Tokens_Used}','\n')
# extract query from LLM response
Query = self.OpenAI_Response_Parser(Response)
return Query
##############################################################################
# Given an single input question, run the entire process
def GPT_Completion(self, Question, Prompt_Template, Correct_Query=False, Correction_Prompt=None, \
Max_Iterations=0,Verbose=False, QueryDB = False, Update_VDS=True, Prompt_Update=True):
Correct_Query_Iterations = 0
# Request Question Embedding vector
Question_Emb = self._VDS.OpenAI_Get_Embedding(Text=Question, Verbose=False)
# Search Vector Datastore for similar questions
rtn = self._VDS.Search_VDS(Question_Emb, Similarity_Func = 'Cosine', Top_n=3)
Prompt_Examples = {'Question':rtn[1], 'Query':rtn[2]}
# Construct prompt
Query = self.Prompt_Query(Prompt_Template, Question, N_Shot_Examples = Prompt_Examples, Verbose=False)
if Verbose:
print(f'Query: \n {Query} \n')
# Test query the DB -
if QueryDB:
status, df = run_query(Query = Query, Credentials = self._MYSQL_Credemtals, DB=self._DB, Verbose=False)
# if query was malformed, llm halucianated for example
if Correct_Query and (status == -5):
while (status == -5) and (Correct_Query_Iterations < Max_Iterations):
Correct_Query_Iterations += 1
print('Attempting to correct query syntax error')
Query = self.Prompt_Query(Correction_Prompt, Question, Verbose=False)
# Query the DB
status, df = run_query(Query = Query, Credentials = self._MYSQL_Credemtals,\
DB=self._DB, Verbose=False)
if Verbose:
print(f'Results of query: \n',df)
if Update_VDS:
if Prompt_Update:
rtn = ''
while rtn not in ('Y','N'):
print(f'Add results to Vector Datastore DB? Y or N')
rtn = input('Prompt> ')
if rtn == 'Y':
self._VDS.Insert_VDS(Question=Question, Query=Query, Metadata='',Embedding=Question_Emb)
else:
self._VDS.Insert_VDS(Question=Question, Query=Query, Metadata='',Embedding=Question_Emb)
# Return Query
return Query, df
##############################################################################
def Load_Prompt_Template(self, File=None):
if File:
try:
with open(File, 'r') as file:
Template = file.read().replace('\n', '')
Status = 0
except:
print(f'Prompt file {File} load failed ')
Status = -1
return "", Status
return Template, Status
#############################################################################
def LangChain_Initiate_LLM(self, Model='OpenAI'):
if Model=='OpenAI':
self._LLM = OpenAI(temperature=self._Temperature, model_name=self._LLM_Model, \
max_tokens=self._Max_Tokens, openai_api_key=self._OpenAI_API_Key)
return 0
else:
print('Model Unsupported')
return -1
# Langchain Completion
def LangChainCompletion(self, Prompt, Input):
chain = LLMChain(llm=self._LLM, prompt=Prompt)
return chain.run(Input)
#############################################################################
def Populate_Embeddings_from_DF_Column(self,Verbose=False):
self._VDS.Retrieve_Embeddings_DF_Column(Verbose=Verbose)
return 0
##############################################################################
# Given an single input question, run the entire process
def GPT_ChatCompletion(self, Question, Max_Iterations=0, Verbose=False, QueryDB = False,
Correct_Query=False, Update_VDS=True, Prompt_Update=True):
# Request Question Embedding vector
Question_Emb = self._VDS.OpenAI_Get_Embedding(Text=Question, Verbose=False)
# Search Vector Datastore for similar questions
rtn = self._VDS.Search_VDS(Question_Emb, Similarity_Func = 'Cosine', Top_n=3)
Prompt_Examples = {'Question':rtn[1], 'Query':rtn[2]}
# Construct prompt
Query = self.Message_Query(Question, Ntem_Shot_Examples = Prompt_Examples, Verbose=False, Debug=False)
if Query == -100:
return
if Verbose:
print(f'Query: \n {Query} \n')
# Test query the DB -
if QueryDB:
status, df = run_query(Query = Query, Credentials = self._MYSQL_Credemtals, DB=self._DB, Verbose=False)
print(f'Status {status}')
# if query was malformed, llm halucianated for example
if Correct_Query and (status == -5):
while (status == -5) and (Correct_Query_Iterations < Max_Iterations):
Correct_Query_Iterations += 1
print('Attempting to correct query syntax error')
Query = self.Prompt_Query(Correction_Prompt, Question, Verbose=False)
# Query the DB
status, df = run_query(Query = Query, Credentials = self._MYSQL_Credemtals,\
DB=self._DB, Verbose=False)
if Verbose:
print(f'Results of query: \n',df)
if Update_VDS:
if Prompt_Update:
rtn = ''
while rtn not in ('Y','N'):
print(f'Add results to Vector Datastore DB? Y or N')
rtn = input('Prompt> ')
if rtn == 'Y':
self._VDS.Insert_VDS(Question=Question, Query=Query, Metadata='',Embedding=Question_Emb)
else:
self._VDS.Insert_VDS(Question=Question, Query=Query, Metadata='',Embedding=Question_Emb)
# Return Query
return Query, df
##############################################################################
# Import Message Template
def Prepare_Message_Template(self, Verbose=False, Debug=False):
# Import Mesage Template file
Filename = f'{self._WD}/Message_Templates/Template_0.txt'
# Filename = self._MessageTemplate
try:
with open(Filename, 'r') as file:
Template = file.read().replace('\n', '')
Status = 0
except:
print(f'Prompt file {Filename} load failed ')
Status = -1
return "", Status
if Debug:
print(f'Template {Template}')
Messages = [{"role": "system", "content": Template}]
if Debug:
print(f'Prepare Message Template: \n {Messages} \n end \n')
return Messages, Status
##############################################################################
# Insert_N_Shot_Messages
def Insert_N_Shot_Messages(self, Messages, N_Shot_Examples, Verbose=False, Debug=False):
"""
Insert example questions and queries into message list for ChatCompletion API
"""
for i in range(len(N_Shot_Examples['Question'])):
Messages.append({"role": "system", "name":"example_user", "content": N_Shot_Examples['Question'][i]})
Messages.append({"role": "system", "name":"example_assistant", "content": N_Shot_Examples['Query'][i]})
if Debug:
print(f'Insert_N_Shot_Examples: {Messages[0]}\n')
print(f'Insert_N_Shot_Examples: {Messages[1]}\n')
print(f'Insert_N_Shot_Examples: {Messages[2]}')
return Messages
##############################################################################
def Insert_Queston(self, Messages, Question, Verbose=True, Debug=False):
"""
Insert question into message list for ChatCompletion API
"""
Messages.append({"role": "user", "content": Question})
if Debug:
print(f'Insert Question: \n {Messages[3]}')
return Messages
##############################################################################
def Message_Query(self, Question = '', N_Shot_Examples = None, Verbose=False, Debug=False):
"""
Message dictionary format for ChatCompletion API
"""
Status = 0
df = pd.DataFrame()
# Prepare Message Template
Messages, Status = self.Prepare_Message_Template(Verbose=True)
# Insert Example Messages
Messages = self.Insert_N_Shot_Messages(Messages, N_Shot_Examples, Verbose=True)
# Insert question
Messages = self.Insert_Queston(Messages, Question, Verbose=True)
if Debug:
print(f' Message_Query: \n {Messages}')
# Estimate input prompt cost
# Cost, Tokens_Used = Prompt_Cost(Prompt, self._LLM_Model, self._Token_Cost, self._Encoding_Base)
# if Verbose:
# print('Input')
# print(f'Total Cost: {round(Cost,3)} Tokens Used {Tokens_Used}')
# Send prompt to LLM
Response = self.OpenAI_ChatCompletion(Messages)
if Debug:
print(f'Prompt: \n',Messages,'\n')
print('Response \n',Response,'\n')
return -100
# Cost, Tokens_Used = OpenAI_Usage_Cost(Response, self._LLM_Model, self._Token_Cost )
if Verbose:
print('Output')
print(f'Total Cost: {round(Cost,3)} Tokens Used {Tokens_Used}','\n')
# extract query from LLM response
Query = self.OpenAI_Response_Parser(Response)
return Query
| [
"langchain.chains.LLMChain",
"langchain.llms.OpenAI"
] | [((394, 457), 'sys.path.append', 'sys.path.append', (['"""/Users/dovcohen/Documents/Projects/AI/NL2SQL"""'], {}), "('/Users/dovcohen/Documents/Projects/AI/NL2SQL')\n", (409, 457), False, 'import sys\n'), ((6238, 6252), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6250, 6252), True, 'import pandas as pd\n'), ((6553, 6628), 'ChatGPT.src.lib.OpenAI_Func.Prompt_Cost', 'Prompt_Cost', (['Prompt', 'self._LLM_Model', 'self._Token_Cost', 'self._Encoding_Base'], {}), '(Prompt, self._LLM_Model, self._Token_Cost, self._Encoding_Base)\n', (6564, 6628), False, 'from ChatGPT.src.lib.OpenAI_Func import Prompt_Cost, OpenAI_Usage_Cost\n'), ((6973, 7035), 'ChatGPT.src.lib.OpenAI_Func.OpenAI_Usage_Cost', 'OpenAI_Usage_Cost', (['Response', 'self._LLM_Model', 'self._Token_Cost'], {}), '(Response, self._LLM_Model, self._Token_Cost)\n', (6990, 7035), False, 'from ChatGPT.src.lib.OpenAI_Func import Prompt_Cost, OpenAI_Usage_Cost\n'), ((10625, 10663), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self._LLM', 'prompt': 'Prompt'}), '(llm=self._LLM, prompt=Prompt)\n', (10633, 10663), False, 'from langchain.chains import LLMChain\n'), ((15713, 15727), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15725, 15727), True, 'import pandas as pd\n'), ((1587, 1662), 'ChatGPT.src.lib.lib_OpenAI_Embeddings.VDS', 'VDS', (['VDSDB_Filename', 'Encoding_Base', 'Embedding_Model', 'Token_Cost', 'Max_Tokens'], {}), '(VDSDB_Filename, Encoding_Base, Embedding_Model, Token_Cost, Max_Tokens)\n', (1590, 1662), False, 'from ChatGPT.src.lib.lib_OpenAI_Embeddings import VDS, OpenAI_Embeddings\n'), ((3395, 3576), 'openai.Completion.create', 'openai.Completion.create', ([], {'model': 'self._LLM_Model', 'prompt': 'Prompt', 'max_tokens': 'self._Max_Tokens', 'temperature': 'self._Temperature', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)'}), '(model=self._LLM_Model, prompt=Prompt, max_tokens=\n self._Max_Tokens, temperature=self._Temperature, top_p=1,\n frequency_penalty=0, presence_penalty=0)\n', (3419, 3576), False, 'import openai\n'), ((4427, 4615), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'self._LLM_Model', 'messages': 'Messages', 'max_tokens': 'self._Max_Tokens', 'temperature': 'self._Temperature', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)'}), '(model=self._LLM_Model, messages=Messages,\n max_tokens=self._Max_Tokens, temperature=self._Temperature, top_p=1,\n frequency_penalty=0, presence_penalty=0)\n', (4455, 4615), False, 'import openai\n'), ((8305, 8395), 'ChatGPT.src.lib.DB_Func.run_query', 'run_query', ([], {'Query': 'Query', 'Credentials': 'self._MYSQL_Credemtals', 'DB': 'self._DB', 'Verbose': '(False)'}), '(Query=Query, Credentials=self._MYSQL_Credemtals, DB=self._DB,\n Verbose=False)\n', (8314, 8395), False, 'from ChatGPT.src.lib.DB_Func import execute_query, run_query\n'), ((10273, 10408), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': 'self._Temperature', 'model_name': 'self._LLM_Model', 'max_tokens': 'self._Max_Tokens', 'openai_api_key': 'self._OpenAI_API_Key'}), '(temperature=self._Temperature, model_name=self._LLM_Model,\n max_tokens=self._Max_Tokens, openai_api_key=self._OpenAI_API_Key)\n', (10279, 10408), False, 'from langchain.llms import OpenAI\n'), ((11908, 11998), 'ChatGPT.src.lib.DB_Func.run_query', 'run_query', ([], {'Query': 'Query', 'Credentials': 'self._MYSQL_Credemtals', 'DB': 'self._DB', 'Verbose': '(False)'}), '(Query=Query, Credentials=self._MYSQL_Credemtals, DB=self._DB,\n Verbose=False)\n', (11917, 11998), False, 'from ChatGPT.src.lib.DB_Func import execute_query, run_query\n'), ((8869, 8959), 'ChatGPT.src.lib.DB_Func.run_query', 'run_query', ([], {'Query': 'Query', 'Credentials': 'self._MYSQL_Credemtals', 'DB': 'self._DB', 'Verbose': '(False)'}), '(Query=Query, Credentials=self._MYSQL_Credemtals, DB=self._DB,\n Verbose=False)\n', (8878, 8959), False, 'from ChatGPT.src.lib.DB_Func import execute_query, run_query\n'), ((12523, 12613), 'ChatGPT.src.lib.DB_Func.run_query', 'run_query', ([], {'Query': 'Query', 'Credentials': 'self._MYSQL_Credemtals', 'DB': 'self._DB', 'Verbose': '(False)'}), '(Query=Query, Credentials=self._MYSQL_Credemtals, DB=self._DB,\n Verbose=False)\n', (12532, 12613), False, 'from ChatGPT.src.lib.DB_Func import execute_query, run_query\n')] |
"""Web base loader class."""
import langchain_community.document_loaders as dl
from langchain.docstore.document import Document
import asyncio
import datetime
from io import StringIO
import logging
import re
import warnings
from typing import Any, AsyncGenerator, Dict, Iterator, List, Optional, Tuple, Union
import inspect
import aiohttp
import discord
import requests
import gui
from htmldate import find_date
import assets
from javascriptasync import require, eval_js, eval_js_a
from .metadataenums import MetadataDocType
from bs4 import BeautifulSoup
"""This is a special loader that makes use of Mozilla's readability library."""
from utility import Timer
def remove_links(markdown_text):
# Regular expression pattern to match masked links
# pattern = r'\[([^\]]+)\]\(([^\)]+)\)'
pattern = r"\[([^\]]+)\]\([^)]+\)"
# Replace the masked links with their text content
no_links_string = re.sub(pattern, r"\1", markdown_text)
return no_links_string
async def check_readability(jsenv, html, url):
myfile = await assets.JavascriptLookup.get_full_pathas(
"readwebpage.js", "WEBJS", jsenv
)
htmls: str = str(html)
rsult = await myfile.check_read(url, htmls, timeout=45)
return rsult
async def read_article_direct(jsenv, html, url):
myfile = await assets.JavascriptLookup.get_full_pathas(
"readwebpage.js", "WEBJS", jsenv
)
timeout = 30
htmls: str = str(html)
pythonObject = {"var": htmls, "url": url}
rsult = await myfile.read_webpage_html_direct(htmls, url, timeout=45)
output = await rsult.get_a("mark")
header = await rsult.get_a("orig")
serial = await header.get_dict_a()
simplified_text = output.strip()
simplified_text = re.sub(r"(\n){4,}", "\n\n\n", simplified_text)
simplified_text = re.sub(r"\n\n", "\n", simplified_text)
simplified_text = re.sub(r" {3,}", " ", simplified_text)
simplified_text = simplified_text.replace("\t", "")
simplified_text = re.sub(r"\n+(\s*\n)*", "\n", simplified_text)
return [simplified_text, serial]
async def read_article_aw(jsenv, html, url):
now = discord.utils.utcnow()
getthread = await read_article_direct(jsenv, html, url)
result = getthread
text, header = result[0], result[1]
return text, header
def _build_metadata(soup: Any, url: str) -> dict:
"""Build metadata from BeautifulSoup output."""
metadata = {"source": url}
if title := soup.find("title"):
metadata["title"] = title.get_text()
if description := soup.find("meta", attrs={"name": "description"}):
metadata["description"] = description.get("content", "No description found.")
if html := soup.find("html"):
metadata["language"] = html.get("lang", "No language found.")
metadata["dateadded"] = datetime.datetime.utcnow().timestamp()
metadata["date"] = "None"
try:
dt = find_date(str(soup))
if dt:
metadata["date"] = dt
except Exception as e:
gui.dprint(e)
metadata["reader"] = False
return metadata
ScrapeResult = Tuple[str, BeautifulSoup, Dict[str, Any]]
class ReadableLoader(dl.WebBaseLoader):
async def _fetch_with_rate_limit(
self, url: str, semaphore: asyncio.Semaphore
) -> str:
# Extended from WebBaseLoader so that it will log the errors
# using this app's logging system.
async with semaphore:
try:
return await self._fetch(url)
except Exception as e:
await self.bot.send_error(e, title="fetching a url.", uselog=True)
if self.continue_on_failure:
self.bot.logs.warning(
f"Error fetching {url}, skipping due to"
f" continue_on_failure=True"
)
return e
self.bot.logs.exception(
f"Error fetching {url} and aborting, use continue_on_failure=True "
"to continue loading urls after encountering an error."
)
raise e
async def scrape_all(
self, urls: List[Tuple[int, str]], parser: Union[str, None] = None
) -> AsyncGenerator[Tuple[int, int, Union[ScrapeResult, Exception]], None]:
"""Fetch all urls, then return soups for all results.
This function is an asyncronous generator."""
regular_urls = []
for e, url in urls:
regular_urls.append(url)
with Timer() as timer:
results = await self.fetch_all(regular_urls)
elapsed_time = timer.get_time()
print(f"READ: Took {elapsed_time:.4f} seconds to gather {len(urls)}.")
for i, result in enumerate(results):
if isinstance(result, Exception):
yield i, urls[i][0], result
continue
url = regular_urls[i]
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
souped = BeautifulSoup(result, parser)
clean_html = re.sub(
r"<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>", "", result
)
print("attempting read of ", urls[i][0], "length is", len(clean_html))
readable = await check_readability(self.jsenv, clean_html, url)
if not readable:
gui.dprint("Not readable link.")
try:
with Timer() as timer:
text, header = await read_article_aw(self.jsenv, clean_html, url)
elapsed_time = timer.get_time()
print(
f"READABILITY: Took {elapsed_time:.4f} seconds to convert {urls[i][0]} to readable."
)
# YIELD THIS:
out = (remove_links(text), souped, header)
yield i, urls[i][0], out
except Exception as e:
gui.dprint(e)
text = souped.get_text(**self.bs_get_text_kwargs)
# YIELD THIS:
out = (remove_links(text), souped, None)
yield i, urls[i][0], out
# return final_results
def _scrape(self, url: str, parser: Union[str, None] = None) -> Any:
from bs4 import BeautifulSoup
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
html_doc = self.session.get(url, **self.requests_kwargs)
if self.raise_for_status:
html_doc.raise_for_status()
html_doc.encoding = html_doc.apparent_encoding
return BeautifulSoup(html_doc.text, parser)
def scrape(self, parser: Union[str, None] = None) -> Any:
"""Scrape data from webpage and return it in BeautifulSoup format."""
if parser is None:
parser = self.default_parser
return self._scrape(self.web_path, parser)
def lazy_load(self) -> Iterator[Document]:
"""Lazy load text from the url(s) in web_path."""
for path in self.web_paths:
soup = self._scrape(path)
text = soup.get_text(**self.bs_get_text_kwargs)
metadata = _build_metadata(soup, path)
yield Document(page_content=text, metadata=metadata)
def load(self) -> List[Document]:
"""Load text from the url(s) in web_path."""
return list(self.lazy_load())
async def aload(
self, bot
) -> AsyncGenerator[Tuple[Union[List[Document], Exception], int, int], None]:
"""Load text from the urls in web_path async into Documents."""
self.jsenv = bot.jsenv
self.bot = bot
self.continue_on_failure = True
docs, typev = [], -1
# e is the original fetched url position.
# i is the position in the self.web_paths list.
# Result is either a tuple or exception.
async for i, e, result in self.scrape_all(self.web_paths):
if isinstance(result, Exception):
yield result, e, -5
else:
try:
text, soup, header = result
metadata = _build_metadata(soup, self.web_paths[i][1])
typev = MetadataDocType.htmltext
if not "title" in metadata:
metadata["title"] = "No Title"
if header is not None:
if "byline" in header:
metadata["authors"] = header["byline"]
metadata["website"] = header.get("siteName", "siteunknown")
metadata["title"] = header.get("title")
typev = MetadataDocType.readertext
metadata["type"] = int(typev)
metadata["sum"] = "source"
yield Document(page_content=text, metadata=metadata), e, typev
except Exception as err:
gui.dprint(str(err))
yield err, e, -5
| [
"langchain.docstore.document.Document"
] | [((914, 951), 're.sub', 're.sub', (['pattern', '"""\\\\1"""', 'markdown_text'], {}), "(pattern, '\\\\1', markdown_text)\n", (920, 951), False, 'import re\n'), ((1742, 1788), 're.sub', 're.sub', (['"""(\\\\n){4,}"""', '"""\n\n\n"""', 'simplified_text'], {}), "('(\\\\n){4,}', '\\n\\n\\n', simplified_text)\n", (1748, 1788), False, 'import re\n'), ((1811, 1850), 're.sub', 're.sub', (['"""\\\\n\\\\n"""', '"""\n"""', 'simplified_text'], {}), "('\\\\n\\\\n', '\\n', simplified_text)\n", (1817, 1850), False, 'import re\n'), ((1872, 1910), 're.sub', 're.sub', (['""" {3,}"""', '""" """', 'simplified_text'], {}), "(' {3,}', ' ', simplified_text)\n", (1878, 1910), False, 'import re\n'), ((1990, 2037), 're.sub', 're.sub', (['"""\\\\n+(\\\\s*\\\\n)*"""', '"""\n"""', 'simplified_text'], {}), "('\\\\n+(\\\\s*\\\\n)*', '\\n', simplified_text)\n", (1996, 2037), False, 'import re\n'), ((2130, 2152), 'discord.utils.utcnow', 'discord.utils.utcnow', ([], {}), '()\n', (2150, 2152), False, 'import discord\n'), ((1048, 1121), 'assets.JavascriptLookup.get_full_pathas', 'assets.JavascriptLookup.get_full_pathas', (['"""readwebpage.js"""', '"""WEBJS"""', 'jsenv'], {}), "('readwebpage.js', 'WEBJS', jsenv)\n", (1087, 1121), False, 'import assets\n'), ((1310, 1383), 'assets.JavascriptLookup.get_full_pathas', 'assets.JavascriptLookup.get_full_pathas', (['"""readwebpage.js"""', '"""WEBJS"""', 'jsenv'], {}), "('readwebpage.js', 'WEBJS', jsenv)\n", (1349, 1383), False, 'import assets\n'), ((6806, 6842), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_doc.text', 'parser'], {}), '(html_doc.text, parser)\n', (6819, 6842), False, 'from bs4 import BeautifulSoup\n'), ((2806, 2832), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2830, 2832), False, 'import datetime\n'), ((3002, 3015), 'gui.dprint', 'gui.dprint', (['e'], {}), '(e)\n', (3012, 3015), False, 'import gui\n'), ((4504, 4511), 'utility.Timer', 'Timer', ([], {}), '()\n', (4509, 4511), False, 'from utility import Timer\n'), ((5136, 5165), 'bs4.BeautifulSoup', 'BeautifulSoup', (['result', 'parser'], {}), '(result, parser)\n', (5149, 5165), False, 'from bs4 import BeautifulSoup\n'), ((5191, 5265), 're.sub', 're.sub', (['"""<script\\\\b[^<]*(?:(?!<\\\\/script>)<[^<]*)*<\\\\/script>"""', '""""""', 'result'], {}), "('<script\\\\b[^<]*(?:(?!<\\\\/script>)<[^<]*)*<\\\\/script>', '', result)\n", (5197, 5265), False, 'import re\n'), ((5498, 5530), 'gui.dprint', 'gui.dprint', (['"""Not readable link."""'], {}), "('Not readable link.')\n", (5508, 5530), False, 'import gui\n'), ((7414, 7460), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7422, 7460), False, 'from langchain.docstore.document import Document\n'), ((5569, 5576), 'utility.Timer', 'Timer', ([], {}), '()\n', (5574, 5576), False, 'from utility import Timer\n'), ((6049, 6062), 'gui.dprint', 'gui.dprint', (['e'], {}), '(e)\n', (6059, 6062), False, 'import gui\n'), ((9021, 9067), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (9029, 9067), False, 'from langchain.docstore.document import Document\n')] |
#!/usr/bin/python3
import cgi
import time
import threading
import langchain
import openai
from langchain.tools import WikipediaQueryRun
from langchain.utilities import WikipediaAPIWrapper
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
print("Content-type:text/html")
print()
form = cgi.FieldStorage()
data=form.getvalue("c")
def run(data):
wikipedia = WikipediaAPIWrapper()
myapikey= openai.api_key="your_openai_key"
myllm = OpenAI(
model= ('text-davinci-003'),
temperature=1,
openai_api_key= myapikey
)
mywikitool=load_tools(tool_names=["wikipedia"])
mywikichain=initialize_agent(
llm=myllm,
tools=mywikitool,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
mywikichain.run(data)
program_thread = threading.Thread(target=run, args=(data))
program_thread.start()
program_thread.join()
op=run(data)
#time.sleep(30)
print(op)
print()
print()
print("<form action= HTTP://'your_ip'/menu.html>")
print("<input type='submit' vaue='Back to Main menu'></form>")
| [
"langchain.agents.initialize_agent",
"langchain.utilities.WikipediaAPIWrapper",
"langchain.llms.OpenAI",
"langchain.agents.load_tools"
] | [((436, 454), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {}), '()\n', (452, 454), False, 'import cgi\n'), ((895, 934), 'threading.Thread', 'threading.Thread', ([], {'target': 'run', 'args': 'data'}), '(target=run, args=data)\n', (911, 934), False, 'import threading\n'), ((507, 528), 'langchain.utilities.WikipediaAPIWrapper', 'WikipediaAPIWrapper', ([], {}), '()\n', (526, 528), False, 'from langchain.utilities import WikipediaAPIWrapper\n'), ((582, 654), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model': '"""text-davinci-003"""', 'temperature': '(1)', 'openai_api_key': 'myapikey'}), "(model='text-davinci-003', temperature=1, openai_api_key=myapikey)\n", (588, 654), False, 'from langchain.llms import OpenAI\n'), ((689, 725), 'langchain.agents.load_tools', 'load_tools', ([], {'tool_names': "['wikipedia']"}), "(tool_names=['wikipedia'])\n", (699, 725), False, 'from langchain.agents import load_tools\n'), ((739, 848), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'llm': 'myllm', 'tools': 'mywikitool', 'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)'}), '(llm=myllm, tools=mywikitool, agent=AgentType.\n ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n', (755, 848), False, 'from langchain.agents import initialize_agent\n')] |
import streamlit as st
import os
# Utils
import time
from typing import List
# Langchain
import langchain
from pydantic import BaseModel
from vertexai.language_models import TextGenerationModel
# Vertex AI
from langchain.llms import VertexAI
from llm_experiments.utils import here
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(here() / 'motorway-genai-ccebd34bd403.json')
# LLM model
llm = VertexAI(
model_name="text-bison@001",
max_output_tokens=1024,
temperature=0.3,
top_p=0.8,
top_k=40,
verbose=True,
)
def form_assistant_prompt(seller_email):
return f"""Your task is to assist a customer service agent.
Step 1: Summarise the following email from a customer who is trying to sell their vehicle and the agent. Use 2-5 bullet points.
Step 2: Estimate the customer's emotional state in no more than 3 words. This might be distressed, angry, upset, happy, excited etc.
Step 3: Output the recommended action for the agent.
Step 3: Draft a response for the agent, in a polite but semi-formal and friendly tone (suitable for a start up).
The email will be delimited with ####
Format your response like this:
****
Summary:
- <bullet 1>
- <bullet 2>
- <bullet 3>
****
Customer's emotional state:
<examples: distressed, angry, upset>
****
Recommended Action:
<Action>
****
Draft Response:
<Draft response>
****
Customer email below:
####
{seller_email}
####
Response:
"""
def form_validation_prompt(summary_and_context, proposed_response):
return f"""Your task is to assist a customer service agent. You will receive an email from a customer
and a proposed response from a customer support agent. You will also receive a summary, an estimate of the
customer's emotional state, the recommended action and then the draft response.
You should conduct the following actions:
Step 1: Identify if the proposed response meets the needs of the customer and is of the appropriate tone
to match the customer's emotional state.
Respond with a "Yes, this meets the customer's needs" or "No, this doesn't meet the customer's needs because <give reason>"
Step 2: Recommend a better response that meets the customer's needs more closely.
The information will be delimited with ####
Format your response like this:
****
Validation:
<response>
****
Improved response:
<response>
****
Context:
####
{summary_and_context}
####
Proposed Response:
####
{proposed_response}
####
Response:
"""
# Streamlit code starts here
st.set_page_config(page_title='Customer Email Assistant 📧🤖', layout='wide')
st.title('Customer Email Assistant 📧🤖')
seller_email = st.text_area('Seller Email',
"""Good morning Unfortunately the dealer who came to collect the car decided that they only wanted to offer me £9800 when they arrived to collect the car and i was not willing to let it go for that amount.
There was 3 men who cam to collect it and it felt really intimidating and pressured.The car has not been collected.""")
if 'result' not in st.session_state:
st.session_state['result'] = None
if st.button('Generate Response') or st.session_state.result is not None:
st.session_state.result = llm(form_assistant_prompt(seller_email))
sections = st.session_state.result.split('****')
for section in sections[1:]:
title, _, rest = section.partition(':')
st.subheader(title)
st.session_state.text_box = st.text_area('', rest.strip())
st.divider()
st.subheader('Check Response:')
draft_response = st.text_area('Draft your response here')
if st.button('Validate Response'):
summary_and_context, _, _ = st.session_state.result.rpartition('****')
response = llm(form_validation_prompt(summary_and_context, draft_response))
title, _, rest = response.partition(':')
st.subheader(title)
st.text_area('', rest)
| [
"langchain.llms.VertexAI"
] | [((400, 518), 'langchain.llms.VertexAI', 'VertexAI', ([], {'model_name': '"""text-bison@001"""', 'max_output_tokens': '(1024)', 'temperature': '(0.3)', 'top_p': '(0.8)', 'top_k': '(40)', 'verbose': '(True)'}), "(model_name='text-bison@001', max_output_tokens=1024, temperature=\n 0.3, top_p=0.8, top_k=40, verbose=True)\n", (408, 518), False, 'from langchain.llms import VertexAI\n'), ((3484, 3559), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Customer Email Assistant 📧🤖"""', 'layout': '"""wide"""'}), "(page_title='Customer Email Assistant 📧🤖', layout='wide')\n", (3502, 3559), True, 'import streamlit as st\n'), ((3561, 3600), 'streamlit.title', 'st.title', (['"""Customer Email Assistant 📧🤖"""'], {}), "('Customer Email Assistant 📧🤖')\n", (3569, 3600), True, 'import streamlit as st\n'), ((3617, 3981), 'streamlit.text_area', 'st.text_area', (['"""Seller Email"""', '"""Good morning Unfortunately the dealer who came to collect the car decided that they only wanted to offer me ¬£9800 when they arrived to collect the car and i was not willing to let it go for that amount.\nThere was 3 men who cam to collect it and it felt really intimidating and pressured.The car has not been collected."""'], {}), '(\'Seller Email\',\n """Good morning Unfortunately the dealer who came to collect the car decided that they only wanted to offer me ¬£9800 when they arrived to collect the car and i was not willing to let it go for that amount.\nThere was 3 men who cam to collect it and it felt really intimidating and pressured.The car has not been collected."""\n )\n', (3629, 3981), True, 'import streamlit as st\n'), ((4454, 4466), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (4464, 4466), True, 'import streamlit as st\n'), ((4468, 4499), 'streamlit.subheader', 'st.subheader', (['"""Check Response:"""'], {}), "('Check Response:')\n", (4480, 4499), True, 'import streamlit as st\n'), ((4517, 4557), 'streamlit.text_area', 'st.text_area', (['"""Draft your response here"""'], {}), "('Draft your response here')\n", (4529, 4557), True, 'import streamlit as st\n'), ((4562, 4592), 'streamlit.button', 'st.button', (['"""Validate Response"""'], {}), "('Validate Response')\n", (4571, 4592), True, 'import streamlit as st\n'), ((4081, 4111), 'streamlit.button', 'st.button', (['"""Generate Response"""'], {}), "('Generate Response')\n", (4090, 4111), True, 'import streamlit as st\n'), ((4239, 4276), 'streamlit.session_state.result.split', 'st.session_state.result.split', (['"""****"""'], {}), "('****')\n", (4268, 4276), True, 'import streamlit as st\n'), ((4626, 4668), 'streamlit.session_state.result.rpartition', 'st.session_state.result.rpartition', (['"""****"""'], {}), "('****')\n", (4660, 4668), True, 'import streamlit as st\n'), ((4798, 4817), 'streamlit.subheader', 'st.subheader', (['title'], {}), '(title)\n', (4810, 4817), True, 'import streamlit as st\n'), ((4822, 4844), 'streamlit.text_area', 'st.text_area', (['""""""', 'rest'], {}), "('', rest)\n", (4834, 4844), True, 'import streamlit as st\n'), ((336, 342), 'llm_experiments.utils.here', 'here', ([], {}), '()\n', (340, 342), False, 'from llm_experiments.utils import here\n'), ((4366, 4385), 'streamlit.subheader', 'st.subheader', (['title'], {}), '(title)\n', (4378, 4385), True, 'import streamlit as st\n')] |
#Multi-agent decentralized speaker selection:
'''
This notebook showcases how to implement a multi-agent simulation without a fixed schedule for who speaks when.
Instead the agents decide for themselves who speaks. We can implement this by having each agent bid to speak.
Whichever agent’s bid is the highest gets to speak.
We will show how to do this in the example below that showcases a fictitious presidential debate.
'''
import os
os.environ["OPENAI_API_KEY"] ="your_api_key"
serpapi_key="your_serpapi_key"
#Import LangChain related modules
from langchain import PromptTemplate
import re
import tenacity
from typing import List, Dict, Callable
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import RegexParser
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage,
BaseMessage,
)
#DialogueAgent and DialogueSimulator classes
#We will use the same DialogueAgent and DialogueSimulator classes defined in Multi-Player Dungeons & Dragons.
def dec_speaker_selection():
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
# increment time
self._step += 1
def step(self) -> tuple[str, str]:
# 1. choose the next speaker
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
# 2. next speaker sends message
message = speaker.send()
# 3. everyone receives message
for receiver in self.agents:
receiver.receive(speaker.name, message)
# 4. increment time
self._step += 1
return speaker.name, message
#BiddingDialogueAgent class
#We define a subclass of DialogueAgent that has a bid() method that produces a bid given the message history and the most recent message.
class BiddingDialogueAgent(DialogueAgent):
def __init__(
self,
name,
system_message: SystemMessage,
bidding_template: PromptTemplate,
model: ChatOpenAI,
) -> None:
super().__init__(name, system_message, model)
self.bidding_template = bidding_template
def bid(self) -> str:
"""
Asks the chat model to output a bid to speak
"""
prompt = PromptTemplate(
input_variables=['message_history', 'recent_message'],
template = self.bidding_template
).format(
message_history='\n'.join(self.message_history),
recent_message=self.message_history[-1])
bid_string = self.model([SystemMessage(content=prompt)]).content
return bid_string
#Define participants and debate topic
character_names = ["Donald Trump", "Kanye West", "Elizabeth Warren"]
topic = "transcontinental high speed rail"
word_limit = 50
#Generate system messages
game_description = f"""Here is the topic for the presidential debate: {topic}.
The presidential candidates are: {', '.join(character_names)}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of each presidential candidate.")
def generate_character_description(character_name):
character_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(content=
f"""{game_description}
Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities.
Speak directly to {character_name}.
Do not add anything else."""
)
]
character_description = ChatOpenAI(temperature=1.0)(character_specifier_prompt).content
return character_description
def generate_character_header(character_name, character_description):
return f"""{game_description}
Your name is {character_name}.
You are a presidential candidate.
Your description is as follows: {character_description}
You are debating the topic: {topic}.
Your goal is to be as creative as possible and make the voters think you are the best candidate.
"""
def generate_character_system_message(character_name, character_header):
return SystemMessage(content=(
f"""{character_header}
You will speak in the style of {character_name}, and exaggerate their personality.
You will come up with creative ideas related to {topic}.
Do not say the same things over and over again.
Speak in the first person from the perspective of {character_name}
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of anyone else.
Speak only from the perspective of {character_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
))
character_descriptions = [generate_character_description(character_name) for character_name in character_names]
character_headers = [generate_character_header(character_name, character_description) for character_name, character_description in zip(character_names, character_descriptions)]
character_system_messages = [generate_character_system_message(character_name, character_headers) for character_name, character_headers in zip(character_names, character_headers)]
for character_name, character_description, character_header, character_system_message in zip(character_names, character_descriptions, character_headers, character_system_messages):
print(f'\n\n{character_name} Description:')
print(f'\n{character_description}')
print(f'\n{character_header}')
print(f'\n{character_system_message.content}')
dec_speaker_selection() | [
"langchain.PromptTemplate",
"langchain.schema.SystemMessage",
"langchain.chat_models.ChatOpenAI",
"langchain.schema.HumanMessage"
] | [((4907, 5006), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You can add detail to the description of each presidential candidate."""'}), "(content=\n 'You can add detail to the description of each presidential candidate.')\n", (4920, 5006), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage\n'), ((6148, 6860), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'f"""{character_header}\n You will speak in the style of {character_name}, and exaggerate their personality.\n You will come up with creative ideas related to {topic}.\n Do not say the same things over and over again.\n Speak in the first person from the perspective of {character_name}\n For describing your own body movements, wrap your description in \'*\'.\n Do not change roles!\n Do not speak from the perspective of anyone else.\n Speak only from the perspective of {character_name}.\n Stop speaking the moment you finish speaking from your perspective.\n Never forget to keep your response to {word_limit} words!\n Do not add anything else.\n """'}), '(content=\n f"""{character_header}\n You will speak in the style of {character_name}, and exaggerate their personality.\n You will come up with creative ideas related to {topic}.\n Do not say the same things over and over again.\n Speak in the first person from the perspective of {character_name}\n For describing your own body movements, wrap your description in \'*\'.\n Do not change roles!\n Do not speak from the perspective of anyone else.\n Speak only from the perspective of {character_name}.\n Stop speaking the moment you finish speaking from your perspective.\n Never forget to keep your response to {word_limit} words!\n Do not add anything else.\n """\n )\n', (6161, 6860), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage\n'), ((5165, 5491), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'f"""{game_description}\n Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities. \n Speak directly to {character_name}.\n Do not add anything else."""'}), '(content=\n f"""{game_description}\n Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities. \n Speak directly to {character_name}.\n Do not add anything else."""\n )\n', (5177, 5491), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage\n'), ((5558, 5585), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(1.0)'}), '(temperature=1.0)\n', (5568, 5585), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4098, 4203), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['message_history', 'recent_message']", 'template': 'self.bidding_template'}), "(input_variables=['message_history', 'recent_message'],\n template=self.bidding_template)\n", (4112, 4203), False, 'from langchain import PromptTemplate\n'), ((4415, 4444), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (4428, 4444), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage\n')] |
import torch
from transformers import BitsAndBytesConfig
from langchain import HuggingFacePipeline
from langchain import PromptTemplate, LLMChain
from pathlib import Path
import langchain
import json
import chromadb
from chromadb.config import Settings
from langchain.llms import HuggingFacePipeline
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain.vectorstores import Chroma
from langchain.document_loaders import DirectoryLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from sentence_transformers import SentenceTransformer, util
from sklearn.metrics.pairwise import cosine_similarity
import utils
with open("config.json", "r") as content:
config = json.load(content)
with open("book_config.json", "r") as content:
book_config = json.load(content)
book = config.get("book")
output_json = "output_json"
dir_path = rf"books/{book}/part"
rootdir = rf"books/{book}/"
wiki_url = book_config.get(book).get("wikipedia_link")
person = book.replace("_", " ")
print(book)
Path(rf"{output_json}/{book}").mkdir(parents=True, exist_ok=True)
loader = DirectoryLoader(rootdir, glob="**/*.txt", loader_cls=TextLoader, loader_kwargs={"encoding": "utf-8"})
data=loader.load()
#######################
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
all_splits = text_splitter.split_documents(data)
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {"device": "cuda"}
embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
#######################
vectordb = Chroma.from_documents(documents=all_splits, embedding=embeddings, persist_directory="chroma_db")
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
)
model_id = "mistralai/Mistral-7B-Instruct-v0.1"
model_4bit = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto",quantization_config=quantization_config, )
tokenizer = AutoTokenizer.from_pretrained(model_id)
pipe = pipeline(
"text-generation",
model=model_4bit,
tokenizer=tokenizer,
use_cache=True,
device_map="auto",
max_length=2000,
do_sample=True,
top_k=5,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
)
llm = HuggingFacePipeline(pipeline=pipe)
# Retriving Top n Chunks most Similar to query.
retriever = vectordb.as_retriever(search_kwargs={"k": 2})
#######################
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
# chain_type="map_reduce",
retriever=retriever,
verbose=True
)
def run_my_rag(qa, query):
# print(f"Query: {query}\n")
result = qa.run(query)
# print("\nResult: ", result)
return result
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
wikipedia_content = utils.get_wikipedia_content(wiki_url)
generated_content = {}
for section_name, section_content in wikipedia_content.items():
query_stage_1 = f"""You are an AI assistant in writing Wikipedia articles on personalities and your task is to expand the existing content of the given Wikipedia section about the personality: "{person}" from the source documents. Can you add 3-4 most relevant sentences to the existing content? DO NOT use any external information.
Existing content: "{section_content}"
New relevant sentences: """
result = qa({"query": query_stage_1})
## Using the retrieved document as context to query the LLM
context = format_docs(result.get("source_documents", []))
query_stage_2 = f"""You are an AI assistant in writing Wikipedia articles on personalities and your task is to expand the existing content of the given Wikipedia section about the personality: "{person}" from the given context. Using the context generate a coherent, insightful and neutral expansion of the existing content. STRCTLY Do not generate more than 4 sentences. If it is not possible to expand the content from the context, say so.
context: "{context}"
Existing content: "{section_name}: {section_content}"
Expanded content: """
generated_content[section_name] = llm(query_stage_2)
# print("="*50)
# print(section_content)
# print("*"*25)
# print(llm(query_stage_2))
old_wikipedia_content = " ".join(wikipedia_content.values())
text = ""
for section_name, text in generated_content.items():
print(text)
print("="*20)
if "not possible" not in text.lower():
wikipedia_content[section_name] += text
with open(f"{output_json}/{book}/e2e_RAG_generated_content.json", "w") as content:
json.dump(generated_content, content)
updated_wikipedia_content = " ".join(wikipedia_content.values())
old_score = utils.calculate_quality(old_wikipedia_content)
updated_score = utils.calculate_quality(updated_wikipedia_content)
# Calculate the difference between corresponding values
difference_dict = {key: updated_score[key] - old_score[key] for key in old_score}
print(difference_dict)
| [
"langchain.document_loaders.DirectoryLoader",
"langchain.embeddings.HuggingFaceEmbeddings",
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.HuggingFacePipeline",
"langchain.vectorstores.Chroma.from_documents"
] | [((1266, 1371), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['rootdir'], {'glob': '"""**/*.txt"""', 'loader_cls': 'TextLoader', 'loader_kwargs': "{'encoding': 'utf-8'}"}), "(rootdir, glob='**/*.txt', loader_cls=TextLoader,\n loader_kwargs={'encoding': 'utf-8'})\n", (1281, 1371), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((1428, 1494), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (1458, 1494), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1646, 1717), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name', 'model_kwargs': 'model_kwargs'}), '(model_name=model_name, model_kwargs=model_kwargs)\n', (1667, 1717), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1754, 1854), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', ([], {'documents': 'all_splits', 'embedding': 'embeddings', 'persist_directory': '"""chroma_db"""'}), "(documents=all_splits, embedding=embeddings,\n persist_directory='chroma_db')\n", (1775, 1854), False, 'from langchain.vectorstores import Chroma\n'), ((1874, 2012), 'transformers.BitsAndBytesConfig', 'BitsAndBytesConfig', ([], {'load_in_4bit': '(True)', 'bnb_4bit_compute_dtype': 'torch.float16', 'bnb_4bit_quant_type': '"""nf4"""', 'bnb_4bit_use_double_quant': '(True)'}), "(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True)\n", (1892, 2012), False, 'from transformers import BitsAndBytesConfig\n'), ((2090, 2200), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'device_map': '"""auto"""', 'quantization_config': 'quantization_config'}), "(model_id, device_map='auto',\n quantization_config=quantization_config)\n", (2126, 2200), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n'), ((2211, 2250), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id)\n', (2240, 2250), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n'), ((2260, 2513), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_4bit', 'tokenizer': 'tokenizer', 'use_cache': '(True)', 'device_map': '"""auto"""', 'max_length': '(2000)', 'do_sample': '(True)', 'top_k': '(5)', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id', 'pad_token_id': 'tokenizer.eos_token_id'}), "('text-generation', model=model_4bit, tokenizer=tokenizer,\n use_cache=True, device_map='auto', max_length=2000, do_sample=True,\n top_k=5, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id,\n pad_token_id=tokenizer.eos_token_id)\n", (2268, 2513), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n'), ((2599, 2633), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'pipe'}), '(pipeline=pipe)\n', (2618, 2633), False, 'from langchain.llms import HuggingFacePipeline\n'), ((2771, 2867), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever', 'verbose': '(True)'}), "(llm=llm, chain_type='stuff', retriever=\n retriever, verbose=True)\n", (2798, 2867), False, 'from langchain.chains import RetrievalQA\n'), ((3154, 3191), 'utils.get_wikipedia_content', 'utils.get_wikipedia_content', (['wiki_url'], {}), '(wiki_url)\n', (3181, 3191), False, 'import utils\n'), ((5051, 5097), 'utils.calculate_quality', 'utils.calculate_quality', (['old_wikipedia_content'], {}), '(old_wikipedia_content)\n', (5074, 5097), False, 'import utils\n'), ((5114, 5164), 'utils.calculate_quality', 'utils.calculate_quality', (['updated_wikipedia_content'], {}), '(updated_wikipedia_content)\n', (5137, 5164), False, 'import utils\n'), ((869, 887), 'json.load', 'json.load', (['content'], {}), '(content)\n', (878, 887), False, 'import json\n'), ((954, 972), 'json.load', 'json.load', (['content'], {}), '(content)\n', (963, 972), False, 'import json\n'), ((4933, 4970), 'json.dump', 'json.dump', (['generated_content', 'content'], {}), '(generated_content, content)\n', (4942, 4970), False, 'import json\n'), ((1190, 1219), 'pathlib.Path', 'Path', (['f"""{output_json}/{book}"""'], {}), "(f'{output_json}/{book}')\n", (1194, 1219), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 配置环境变量
import os
from LangChain_study.common import ChatParam
os.environ["OPENAI_API_KEY"] = ChatParam.OPENAI_API_KEY
os.environ["OPENAI_API_BASE"] = ChatParam.OPENAI_API_BASE
# 初始化LLM模型
import langchain
from langchain.llms import OpenAI
llm = OpenAI(model_name="text-davinci-002", n=2, best_of=2)
# 利用内存缓存返回结果:结果会缓存到内存中,在此问相同的问题会直接从内存中读取结果
from langchain.cache import InMemoryCache
langchain.llm_cache = InMemoryCache()
print(llm("Tell me a joke"))
print("----------- 在此提问 ————————————————")
print(llm("Tell me a joke"))
# 支持的缓存方式:SQLite 、Redis(支持语义缓存) 、GPTCache(精确匹配缓存或基于语义相似性缓存)、Momento缓存、SQLAlchemy 缓存 | [
"langchain.cache.InMemoryCache",
"langchain.llms.OpenAI"
] | [((295, 348), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-002"""', 'n': '(2)', 'best_of': '(2)'}), "(model_name='text-davinci-002', n=2, best_of=2)\n", (301, 348), False, 'from langchain.llms import OpenAI\n'), ((457, 472), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (470, 472), False, 'from langchain.cache import InMemoryCache\n')] |
import streamlit as st
import langchain_helper as lch
st.title("🐶 Pets Name Generator")
animal_type = st.sidebar.selectbox(
"What is your pet?", ("Dog", "Cat", "Hamster", "Rat", "Snake", "Lizard", "Cow")
)
if animal_type == "Dog":
pet_color = st.sidebar.text_area(label="What color is your dog?", max_chars=15)
if animal_type == "Cat":
pet_color = st.sidebar.text_area(label="What color is your cat?", max_chars=15)
if animal_type == "Hamster":
pet_color = st.sidebar.text_area(label="What color is your hamster?", max_chars=15)
if animal_type == "Rat":
pet_color = st.sidebar.text_area(label="What color is your rat?", max_chars=25)
if animal_type == "Snake":
pet_color = st.sidebar.text_area(label="What color is your snake?", max_chars=25)
if animal_type == "Lizard":
pet_color = st.sidebar.text_area(label="What color is your lizard?", max_chars=25)
if animal_type == "Cow":
pet_color = st.sidebar.text_area(label="What color is your cow?", max_chars=25)
with st.sidebar:
openai_api_key = st.text_input(
"OpenAI API Key", key="langchain_search_api_key_openai", type="password"
)
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/rishabkumar7/pets-name-langchain/tree/main)"
if pet_color:
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
response = lch.generate_pet_name(animal_type, pet_color, openai_api_key)
st.text(response["pet_name"])
| [
"langchain_helper.generate_pet_name"
] | [((55, 88), 'streamlit.title', 'st.title', (['"""🐶 Pets Name Generator"""'], {}), "('🐶 Pets Name Generator')\n", (63, 88), True, 'import streamlit as st\n'), ((104, 209), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""What is your pet?"""', "('Dog', 'Cat', 'Hamster', 'Rat', 'Snake', 'Lizard', 'Cow')"], {}), "('What is your pet?', ('Dog', 'Cat', 'Hamster', 'Rat',\n 'Snake', 'Lizard', 'Cow'))\n", (124, 209), True, 'import streamlit as st\n'), ((255, 322), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""What color is your dog?"""', 'max_chars': '(15)'}), "(label='What color is your dog?', max_chars=15)\n", (275, 322), True, 'import streamlit as st\n'), ((365, 432), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""What color is your cat?"""', 'max_chars': '(15)'}), "(label='What color is your cat?', max_chars=15)\n", (385, 432), True, 'import streamlit as st\n'), ((479, 550), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""What color is your hamster?"""', 'max_chars': '(15)'}), "(label='What color is your hamster?', max_chars=15)\n", (499, 550), True, 'import streamlit as st\n'), ((593, 660), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""What color is your rat?"""', 'max_chars': '(25)'}), "(label='What color is your rat?', max_chars=25)\n", (613, 660), True, 'import streamlit as st\n'), ((705, 774), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""What color is your snake?"""', 'max_chars': '(25)'}), "(label='What color is your snake?', max_chars=25)\n", (725, 774), True, 'import streamlit as st\n'), ((820, 890), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""What color is your lizard?"""', 'max_chars': '(25)'}), "(label='What color is your lizard?', max_chars=25)\n", (840, 890), True, 'import streamlit as st\n'), ((933, 1000), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""What color is your cow?"""', 'max_chars': '(25)'}), "(label='What color is your cow?', max_chars=25)\n", (953, 1000), True, 'import streamlit as st\n'), ((1040, 1132), 'streamlit.text_input', 'st.text_input', (['"""OpenAI API Key"""'], {'key': '"""langchain_search_api_key_openai"""', 'type': '"""password"""'}), "('OpenAI API Key', key='langchain_search_api_key_openai', type\n ='password')\n", (1053, 1132), True, 'import streamlit as st\n'), ((1448, 1509), 'langchain_helper.generate_pet_name', 'lch.generate_pet_name', (['animal_type', 'pet_color', 'openai_api_key'], {}), '(animal_type, pet_color, openai_api_key)\n', (1469, 1509), True, 'import langchain_helper as lch\n'), ((1514, 1543), 'streamlit.text', 'st.text', (["response['pet_name']"], {}), "(response['pet_name'])\n", (1521, 1543), True, 'import streamlit as st\n'), ((1360, 1414), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (1367, 1414), True, 'import streamlit as st\n'), ((1423, 1432), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (1430, 1432), True, 'import streamlit as st\n')] |
import pandas as pd
from langchain.document_loaders.word_document import Docx2txtLoader
# this does not work, some how, I can not install some of its requirement libs.
from langchain.document_loaders.word_document import UnstructuredWordDocumentLoader
# from langchain.text_splitter import CharacterTextSplitter
import langchain.text_splitter as ts
from langchain.embeddings import HuggingFaceInstructEmbeddings
# from langchain.vectorstores import FAISS
import re
loader = Docx2txtLoader(
"../../data/raw/6. HR.03.V3.2023. Nội quy Lao động_Review by Labor Department - Final.DOCX")
doc = loader.load()[0].page_content.lower()
doc = doc.replace("\t", "")
doc = re.sub(r"\n+", r".\n\n ", doc)
replace_mapping = { # this is for replacing some unwanted characters
"..\n": ".\n",
". .\n": ".\n",
"?.\n": "?\n",
"? .\n": "?\n",
":.\n": ":\n",
": .\n": ":\n",
";.\n": ",\n",
": .\n": ";\n"
}
for pattern, replacement in replace_mapping.items():
doc = doc.replace(pattern, replacement)
print(doc)
# splitting into chunks
char_splt = ts.CharacterTextSplitter(separator='.', chunk_size=1000)
doc_chunks = char_splt.split_text(text=doc)
# ------ Segmenting vietnamese
# this model is for segmenting vietnamese text before tokenizing.
import py_vncorenlp
model = py_vncorenlp.VnCoreNLP(save_dir="../../models/VnCoreNLP")
# model.word_segment(doc) # this chunked the text into pieces already
segmented_chunks = []
for i in doc_chunks:
i = model.word_segment(i)
segmented_chunks.append(i)
segmented_chunks2=[] # now we have to rejoin each element of the list
for chunks_list in segmented_chunks:
chunks_string="\n".join(chunks_list)
segmented_chunks2.append(chunks_string)
# type(segmented_chunks2[0])
# def plotting_len_text_list(tex_list):
# len_series = pd.Series([len(i) for i in tex_list])
# len_series.plot(kind='barh', figsize=(5, len(len_series)/3))
# plotting_len_text_list(segmented_chunks2)
# ------
import torch
from transformers import AutoModel, AutoTokenizer
# from langchain.text_splitter import TextSplitter
phobert = AutoModel.from_pretrained("vinai/phobert-base-v2")
tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base-v2")
# With TensorFlow 2.0+:
# from transformers import TFAutoModel
# phobert = TFAutoModel.from_pretrained("vinai/phobert-base")
# # reading the text
# file = open('../../data/processed/6. HR.03.V3.2023. Nội quy Lao động_Review by Labor Department - Final.txt', 'r')
# input_txt = file.read()
# file.close()
token_tensors=[] # this stores the token_tensor of word tokenization for each segmented text_chunk
for chunk in segmented_chunks2:
input_ids = torch.tensor([tokenizer.encode(chunk)])
token_tensors.append(input_ids)
# vocab_dict=tokenizer.get_vocab() # get the vocab_dict of the tokenizer
# for id in input_ids[0].tolist(): # print out the index ot the word after getting tokenized
# for key, val in vocab_dict.items():
# if val==id:
# print(id, key)
features_list=[]
for tensor in token_tensors[:10]:
with torch.no_grad():
features = phobert(tensor).pooler_output # Models outputs are now tuples
features_list.append(features)
# I think this works, but not with text_chunks which is to big, the model will not process.
# with torch.no_grad():
# features = phobert(token_tensors[0])
"""___note___
features.keys()=odict_keys(['last_hidden_state', 'pooler_output'])
features.last_hidden_state is the tensor that store vectors for word-embedding.
if the input_ids has 37 tokens, features.last_hidden_state has 37 vectors length 768.
features.pooler_output is the tensor that store vectors for sentence-embedding contains
only 1 vector length 768.
"""
len(features.last_hidden_state[0])
word_vectors_list=features.last_hidden_state[0].tolist()
for index, vector in enumerate(word_vectors_list):
print(index, len(vector))
len(features.pooler_output[0])
# # ----
# def get_text_chunks(text):
# text_splitter = CharacterTextSplitter(
# separator="\n",
# chunk_size=1000,
# chunk_overlap=200,
# length_function=len
# )
# chunks = text_splitter.split_text(text)
# return chunks
# chunks=get_text_chunks(text=input_txt.lower())
# for i in chunks:
# print()
# print(len(i), "\n", i)
# def get_vectorstore(text_chunks):
# embeddings = HuggingFaceInstructEmbeddings(model_name="vinai/phobert-base-v2")
# vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
# return vectorstore
# vector_store = get_vectorstore(text_chunks=chunks) # this does not work
# vector_store = get_vectorstore(text_chunks=segmented_chunks2[:1]) # this does not work
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.document_loaders.word_document.Docx2txtLoader"
] | [((477, 594), 'langchain.document_loaders.word_document.Docx2txtLoader', 'Docx2txtLoader', (['"""../../data/raw/6. HR.03.V3.2023. Nội quy Lao động_Review by Labor Department - Final.DOCX"""'], {}), "(\n '../../data/raw/6. HR.03.V3.2023. Nội quy Lao động_Review by Labor Department - Final.DOCX'\n )\n", (491, 594), False, 'from langchain.document_loaders.word_document import Docx2txtLoader\n'), ((668, 699), 're.sub', 're.sub', (['"""\\\\n+"""', '""".\\\\n\\\\n """', 'doc'], {}), "('\\\\n+', '.\\\\n\\\\n ', doc)\n", (674, 699), False, 'import re\n'), ((1072, 1128), 'langchain.text_splitter.CharacterTextSplitter', 'ts.CharacterTextSplitter', ([], {'separator': '"""."""', 'chunk_size': '(1000)'}), "(separator='.', chunk_size=1000)\n", (1096, 1128), True, 'import langchain.text_splitter as ts\n'), ((1300, 1357), 'py_vncorenlp.VnCoreNLP', 'py_vncorenlp.VnCoreNLP', ([], {'save_dir': '"""../../models/VnCoreNLP"""'}), "(save_dir='../../models/VnCoreNLP')\n", (1322, 1357), False, 'import py_vncorenlp\n'), ((2099, 2149), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['"""vinai/phobert-base-v2"""'], {}), "('vinai/phobert-base-v2')\n", (2124, 2149), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((2162, 2216), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""vinai/phobert-base-v2"""'], {}), "('vinai/phobert-base-v2')\n", (2191, 2216), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((3071, 3086), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3084, 3086), False, 'import torch\n')] |
"""Create a ConversationalRetrievalChain for question/answering."""
import imp
import logging
import sys
from typing import Union
from langchain.callbacks.base import BaseCallbackManager, BaseCallbackHandler
from langchain.callbacks.tracers import LangChainTracer
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.vectorstores.base import VectorStore
from base_bot.prompts import QA_PROMPT, REPHRASE_PROMPT
from . import config
# dynamic import
def dynamic_imp(name):
# find_module() method is used
# to find the module and return
# its description and path
try:
fp, path, desc = imp.find_module(name, [".", "base_bot/llm"])
except ImportError as e:
logging.error("module not found: " + name + " " + str(e))
try:
# load_modules loads the module
# dynamically and takes the filepath
# module and description as parameter
return imp.load_module(name, fp, path, desc)
except Exception as e:
logging.error("error loading module: " + name + " " + str(e))
def get_chain(
vectorstore: Union[VectorStore, any], rephrase_handler: BaseCallbackHandler, stream_handler: BaseCallbackHandler, tracing: bool = False
) -> ConversationalRetrievalChain:
_vectorstore = vectorstore() if callable(vectorstore) else vectorstore
manager = BaseCallbackManager([])
rephrase_manager = BaseCallbackManager([rephrase_handler])
stream_manager = BaseCallbackManager([stream_handler])
if tracing:
tracer = LangChainTracer()
tracer.load_default_session()
manager.add_handler(tracer)
rephrase_manager.add_handler(tracer)
stream_manager.add_handler(tracer)
llm_package = dynamic_imp(config.LLM_MODULE)
rephrase_generator_llm = llm_package.getLLM(
model=config.LLM_REPRHASING_MODEL,
temperature=config.LLM_REPHRASING_TEMPERATURE,
verbose=config.LLM_REPHRASING_VERBOSE,
callback_manager=rephrase_manager,
)
streaming_llm = llm_package.getLLM(
streaming=True,
callback_manager=stream_manager,
verbose=config.LLM_STREAMING_VERBOSE,
temperature=config.LLM_STREAMING_TEMPERATURE,
model=config.LLM_STREAMING_MODEL,
)
rephrase_generator = LLMChain(
llm=rephrase_generator_llm, prompt=REPHRASE_PROMPT, callback_manager=manager
)
doc_chain = load_qa_chain(
streaming_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager
)
qa = ConversationalRetrievalChain(
retriever=_vectorstore.as_retriever(),
combine_docs_chain=doc_chain,
question_generator=rephrase_generator,
callback_manager=manager,
)
return qa
| [
"langchain.callbacks.base.BaseCallbackManager",
"langchain.chains.question_answering.load_qa_chain",
"langchain.callbacks.tracers.LangChainTracer",
"langchain.chains.llm.LLMChain"
] | [((1450, 1473), 'langchain.callbacks.base.BaseCallbackManager', 'BaseCallbackManager', (['[]'], {}), '([])\n', (1469, 1473), False, 'from langchain.callbacks.base import BaseCallbackManager, BaseCallbackHandler\n'), ((1497, 1536), 'langchain.callbacks.base.BaseCallbackManager', 'BaseCallbackManager', (['[rephrase_handler]'], {}), '([rephrase_handler])\n', (1516, 1536), False, 'from langchain.callbacks.base import BaseCallbackManager, BaseCallbackHandler\n'), ((1558, 1595), 'langchain.callbacks.base.BaseCallbackManager', 'BaseCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (1577, 1595), False, 'from langchain.callbacks.base import BaseCallbackManager, BaseCallbackHandler\n'), ((2381, 2471), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'rephrase_generator_llm', 'prompt': 'REPHRASE_PROMPT', 'callback_manager': 'manager'}), '(llm=rephrase_generator_llm, prompt=REPHRASE_PROMPT,\n callback_manager=manager)\n', (2389, 2471), False, 'from langchain.chains.llm import LLMChain\n'), ((2498, 2594), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'chain_type': '"""stuff"""', 'prompt': 'QA_PROMPT', 'callback_manager': 'manager'}), "(streaming_llm, chain_type='stuff', prompt=QA_PROMPT,\n callback_manager=manager)\n", (2511, 2594), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((736, 780), 'imp.find_module', 'imp.find_module', (['name', "['.', 'base_bot/llm']"], {}), "(name, ['.', 'base_bot/llm'])\n", (751, 780), False, 'import imp\n'), ((1033, 1070), 'imp.load_module', 'imp.load_module', (['name', 'fp', 'path', 'desc'], {}), '(name, fp, path, desc)\n', (1048, 1070), False, 'import imp\n'), ((1629, 1646), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (1644, 1646), False, 'from langchain.callbacks.tracers import LangChainTracer\n')] |
from dotenv import load_dotenv
load_dotenv()
from langchain.pydantic_v1 import BaseModel, Field, validator
from langchain.chat_models import ChatOpenAI
from langchain.chains.openai_functions import create_structured_output_chain
from typing import Optional
from langchain.prompts import ChatPromptTemplate
import langchain
class Person(BaseModel):
"""Identifying information about a person."""
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
@validator("fav_food")
def is_valid_food(cls, field):
raise ValueError("This is not a valid food")
# If we pass in a model explicitly, we need to make sure it supports the OpenAI function-calling API.
llm = ChatOpenAI(model="gpt-4", temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a world class algorithm for extracting information in structured formats.",
),
(
"human",
"Use the given format to extract information from the following input: {input}",
),
("human", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_structured_output_chain(
Person,
llm,
prompt,
)
# result = chain.run("Sally is 13")
# print(result)
result = chain.apply(
[
{"input": "Sally is 13"},
{"input": "Angela is 13 and likes pizza"},
]
)
print(result)
| [
"langchain.pydantic_v1.Field",
"langchain.chains.openai_functions.create_structured_output_chain",
"langchain.pydantic_v1.validator",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages"
] | [((32, 45), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (43, 45), False, 'from dotenv import load_dotenv\n'), ((831, 871), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0)'}), "(model='gpt-4', temperature=0)\n", (841, 871), False, 'from langchain.chat_models import ChatOpenAI\n'), ((881, 1183), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system',\n 'You are a world class algorithm for extracting information in structured formats.'\n ), ('human',\n 'Use the given format to extract information from the following input: {input}'\n ), ('human', 'Tip: Make sure to answer in the correct format')]"], {}), "([('system',\n 'You are a world class algorithm for extracting information in structured formats.'\n ), ('human',\n 'Use the given format to extract information from the following input: {input}'\n ), ('human', 'Tip: Make sure to answer in the correct format')])\n", (913, 1183), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((1282, 1333), 'langchain.chains.openai_functions.create_structured_output_chain', 'create_structured_output_chain', (['Person', 'llm', 'prompt'], {}), '(Person, llm, prompt)\n', (1312, 1333), False, 'from langchain.chains.openai_functions import create_structured_output_chain\n'), ((419, 462), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The person\'s name"""'}), '(..., description="The person\'s name")\n', (424, 462), False, 'from langchain.pydantic_v1 import BaseModel, Field, validator\n'), ((478, 520), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The person\'s age"""'}), '(..., description="The person\'s age")\n', (483, 520), False, 'from langchain.pydantic_v1 import BaseModel, Field, validator\n'), ((551, 604), 'langchain.pydantic_v1.Field', 'Field', (['None'], {'description': '"""The person\'s favorite food"""'}), '(None, description="The person\'s favorite food")\n', (556, 604), False, 'from langchain.pydantic_v1 import BaseModel, Field, validator\n'), ((611, 632), 'langchain.pydantic_v1.validator', 'validator', (['"""fav_food"""'], {}), "('fav_food')\n", (620, 632), False, 'from langchain.pydantic_v1 import BaseModel, Field, validator\n')] |
# Drive Imports
import yaml
import asyncio
from deferred_imports import langchain, imports_done
import webbrowser
# Global Variables
dictionaries_folder_path=""
structure_dictionary_path=""
information_dictionary_path=""
folder_dictionary_path=""
# Information Mapping
async def a_update_mapping(your_dictionary,override=False):
tasks=[]
for key in list(your_dictionary.keys()):
if override or 'mappedDate' not in your_dictionary[key] or not your_dictionary[key]['mappedDate'] or your_dictionary[key]['modifiedDate'] > your_dictionary[key]['mappedDate']:
tasks.append(a_generate_mapping(your_dictionary[key]['content'],your_dictionary[key]['title'],your_dictionary[key]['path'],key))
results=await asyncio.gather(*tasks)
for item in results:
id=list(item.keys())[0]
your_dictionary[id]['mapping'] = item[id]
your_dictionary[id]['mapped']=True
your_dictionary[id]['mappedDate'] = your_dictionary[id]['modifiedDate']
return your_dictionary
# Information Mapping
async def a_generate_mapping(content,title,parent,id):
imports_done.wait()
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# Define the templates
system_template="I want you to give a summary of the of the document the user gives you as if you were describing it and what it is for, the user will also tell you the path of the parent directory of the document and its title which you should use to understand what the summary should be, for example a 'book summaries' document's summary should include that it is a summary of books and not simply the contents of the books."
human_template="Here is my document title-{title}, with parent directory-{parent}, and here is content of the document:\n{content}"
system_template2="""You are to provide a useful summary and description so that someone reading your description would know what you are refering to. Use the context of the title and parent directory to understand what the description and summary should be. Think before begin to describe the document. Break down the path into sections splitting with each \\ and think about them out loud, tell me what the meaning of each directory is in your interpretation. After you are finished thinking give your description . Your response should follow this template:
\"'Thoughts: 'what your thoughts are on the meaning of the document are and what it is for with relation to its title and parent directory'
Description: 'your description and summary based on your thoughts so someone would know what this is for'\""""
# Create the prompt
system_message=SystemMessagePromptTemplate.from_template(system_template)
human_message=HumanMessagePromptTemplate.from_template(human_template)
system_message2=SystemMessagePromptTemplate.from_template(system_template2)
message_list=[system_message,human_message,system_message2]
chat_prompt=ChatPromptTemplate.from_messages(message_list)
# Generate the mapping
formated_prompt=chat_prompt.format_prompt(content=content, title=title, parent=parent).to_messages()
raw_string_prompt=""
for item in formated_prompt:
raw_string_prompt+=item.type+": "+item.content+"\n\n"
if len(raw_string_prompt)>9000:
model_name="gpt-3.5-turbo-16k"
else:
model_name="gpt-3.5-turbo"
chat=ChatOpenAI(model=model_name,temperature=0.3)
chat_response=await chat.agenerate([formated_prompt])
print(title+" "+parent+" "+chat_response.generations[0][0].text+"\n\n")
output_string=chat_response.generations[0][0].text
# Parse the mapping
mapped_result=""
mapped_result=(output_string).split("Description: ")[-1]
if mapped_result=="":
mapped_result=(output_string).split("Description:")[-1]
return {id:mapped_result}
# Folder Mapping
async def a_update_folder_mapping(folder_dictionary,information_dictionary,override=False):
finished_folders=[]
length_folders=len(list(folder_dictionary.keys()))
results=[]
while(length_folders>len(finished_folders)):
tasks=[]
print("finished folders: "+str(len(finished_folders))+"/"+str(length_folders))
for key in list(folder_dictionary.keys()):
#check if the key is already mapped
if not override and 'mappedDate' in folder_dictionary[key] and folder_dictionary[key]['mappedDate'] and folder_dictionary[key]['modifiedDate'] <= folder_dictionary[key]['mappedDate']:
finished_folders.append(key)
print("Already done: "+key)
else:
print("Not done: "+key, override, 'mappedDate' in folder_dictionary[key], folder_dictionary[key]['mappedDate'], folder_dictionary[key]['modifiedDate'] <= folder_dictionary[key]['mappedDate'])
if key not in finished_folders:
if folder_dictionary[key]["contained_folder_ids"]==[]:
#Create task
contents=""
for file_id in folder_dictionary[key]["contained_file_ids"]:
contents+=(information_dictionary[file_id]["mapping"])+"\n"
for folder_id in folder_dictionary[key]["contained_folder_ids"]:
contents+=(folder_dictionary[folder_id]["mapping"])+"\n"
tasks.append(a_generate_folder_mapping(contents,folder_dictionary[key]['title'],folder_dictionary[key]['path'],key))
finished_folders.append(key)
else:
all_completed=True
for cf in folder_dictionary[key]["contained_folder_ids"]:
if cf not in finished_folders:
all_completed=False
if all_completed:
#Create task
contents=""
for file_id in folder_dictionary[key]["contained_file_ids"]:
contents+=(information_dictionary[file_id]["mapping"])+"\n"
for folder_id in folder_dictionary[key]["contained_folder_ids"]:
contents+=(folder_dictionary[folder_id]["mapping"])+"\n"
tasks.append(a_generate_folder_mapping(contents,folder_dictionary[key]['title'],folder_dictionary[key]['path'],key))
finished_folders.append(key)
results.append(await asyncio.gather(*tasks))
for result in results:
for item in result:
id=list(item.keys())[0]
folder_dictionary[id]['mapping'] = item[id]
folder_dictionary[id]['mapped']=True
folder_dictionary[id]['mappedDate'] = folder_dictionary[id]['modifiedDate']
return(folder_dictionary)
# Folder Mapping
async def a_generate_folder_mapping(contents,title,parent,id):
# Setup imports
imports_done.wait()
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# Define the templates
system_template="I want you to give a summary of the of the folder the user gives you as if you were describing it and what it is for, the user will also tell you the path of the parent directory of the folder, the folder title, and the descriptions of the contents of the files or folders the folder contains which you should use to understand what the description should be."
human_template="Here is my folder title-{title}, with parent directory-{parent}, and here are the contents of the folder:\n{contents}"
system_template2="""You are to provide a useful summary and description so that someone reading your description would know what you are refering to. Use the context of the title and parent directory to understand what the description and summary should be. Think before you begin to describe the document. Break down the path into sections splitting with each \\ and think about them out loud, tell me what the meaning of each directory is in your interpretation. After you are finished thinking give your description . Your response should follow this template:
\"'Thoughts: 'what your thoughts are on the meaning of the folder are and what it is for with relation to its title and parent directory'
Description: 'your description and summary based on your thoughts so someone would know what this is for'\""""
# Create the prompt
system_message=SystemMessagePromptTemplate.from_template(system_template)
human_message=HumanMessagePromptTemplate.from_template(human_template)
system_message2=SystemMessagePromptTemplate.from_template(system_template2)
message_list=[system_message,human_message,system_message2]
chat_prompt=ChatPromptTemplate.from_messages(message_list)
# Get the response of the mapping for the item
formated_prompt=chat_prompt.format_prompt(contents=contents, title=title, parent=parent).to_messages()
raw_string_prompt=""
for item in formated_prompt:
raw_string_prompt+=item.type+": "+item.content+"\n\n"
if len(raw_string_prompt)>9000:
model_name="gpt-3.5-turbo-16k"
else:
model_name="gpt-3.5-turbo"
chat=ChatOpenAI(model=model_name,temperature=0.3)
chat_response=await chat.agenerate([formated_prompt])
print(title+" "+parent+" "+chat_response.generations[0][0].text+"\n\n")
output_string=chat_response.generations[0][0].text
# Parse the mapping
mapped_result=(output_string).split("Description: ")[-1]
if mapped_result=="":
mapped_result=(output_string).split("Description:")[-1]
return {id:mapped_result}
# Generate Mappings
def map(override=False):
# Setup dictionary paths
global dictionaries_folder_path, structure_dictionary_path, information_dictionary_path, folder_dictionary_path
# map information dictionary
with open(information_dictionary_path, "r") as file:
information_dict = yaml.load(file, Loader=yaml.FullLoader)
information_dict=asyncio.run(a_update_mapping(information_dict,override=override))
with open(information_dictionary_path, 'w') as outfile:
yaml.dump(information_dict, outfile)
# Map the folder dictionary
with open(information_dictionary_path, "r") as file:
information_dict = yaml.load(file, Loader=yaml.FullLoader)
with open(folder_dictionary_path, "r") as file:
folder_dictionary = yaml.load(file, Loader=yaml.FullLoader)
folder_dictionary=asyncio.run(a_update_folder_mapping(folder_dictionary,information_dict,override=False))
with open(folder_dictionary_path, 'w') as outfile:
yaml.dump(folder_dictionary, outfile)
print("Done mapping")
# Update Database
def update_vectordb(persist_directory,finish_que):
# Setup imports
imports_done.wait()
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
# Create custom Document class
class Document:
def __init__(self, page_content="",source="",dict_id="",mimeType="",title=""):
self.page_content = page_content
self.metadata={'source': source, 'id': dict_id, "mimeType":mimeType,"title":title}
def __repr__(self):
attributes = ", ".join(f"{k}={v!r}" for k, v in vars(self).items())
return f"Document({attributes})"
# Read from information dictionary
global dictionaries_folder_path, structure_dictionary_path, information_dictionary_path, folder_dictionary_path
if "information" in persist_directory:
base_dict_file_name=information_dictionary_path
elif "folder" in persist_directory:
base_dict_file_name=folder_dictionary_path
with open(base_dict_file_name) as f:
base_dict = yaml.load(f, Loader=yaml.FullLoader)
# Create list of documents
my_documents = []
for key in list(base_dict.keys()):
if base_dict[key]["path"]=="":
my_documents.append(Document(base_dict[key]["mapping"],source=base_dict[key]["path"]+"none id:"+base_dict[key]["id"]+":mimeType:"+base_dict[key]["mimeType"], dict_id=base_dict[key]["id"],mimeType=base_dict[key]["mimeType"],title=base_dict[key]["title"]))
else:
my_documents.append(Document(base_dict[key]["mapping"],source=base_dict[key]["path"]+" id:"+base_dict[key]["id"]+":mimeType:"+base_dict[key]["mimeType"], dict_id=base_dict[key]["id"],mimeType=base_dict[key]["mimeType"],title=base_dict[key]["title"]))
# Delete and regenerate the database
embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
try:
vectordb.delete_collection()
vectordb.persist()
except Exception as e:
print(e)
vectordb = Chroma.from_documents(
documents=my_documents,
embedding=embedding,
persist_directory=persist_directory
)
vectordb.persist()
vectordb = None
# Depricated queue usage
finish_que.put(True)
# Make Vector Database with files and folders
def combine_vectordb(persist_directory):
#Setup imports and paths
imports_done.wait()
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
global dictionaries_folder_path, structure_dictionary_path, information_dictionary_path, folder_dictionary_path
# Create custom Document class
class Document:
def __init__(self, page_content="",source="",dict_id="",mimeType="",title=""):
self.page_content = page_content
self.metadata={'source': source, 'id': dict_id, "mimeType":mimeType,"title":title}
def __repr__(self):
attributes = ", ".join(f"{k}={v!r}" for k, v in vars(self).items())
return f"Document({attributes})"
# Dicitonary to list of documents function
def add_documents(base_dict):
my_documents = []
for key in list(base_dict.keys()):
if base_dict[key]["path"]=="":
my_documents.append(Document(base_dict[key]["mapping"],source=base_dict[key]["path"]+"none id:"+base_dict[key]["id"]+":mimeType:"+base_dict[key]["mimeType"], dict_id=base_dict[key]["id"],mimeType=base_dict[key]["mimeType"],title=base_dict[key]["title"]))
else:
my_documents.append(Document(base_dict[key]["mapping"],source=base_dict[key]["path"]+" id:"+base_dict[key]["id"]+":mimeType:"+base_dict[key]["mimeType"], dict_id=base_dict[key]["id"],mimeType=base_dict[key]["mimeType"],title=base_dict[key]["title"]))
return(my_documents)
# Read from information and folder dictionaries
with open(information_dictionary_path) as f:
information_dict = yaml.load(f, Loader=yaml.FullLoader)
with open(folder_dictionary_path) as f:
folder_dict = yaml.load(f, Loader=yaml.FullLoader)
# Turn dictionaries into document list
my_documents=add_documents(information_dict)+add_documents(folder_dict)
# Delete and regenerate the combined_db database
embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
try:
vectordb.delete_collection()
vectordb.persist()
except Exception as e:
print(e)
vectordb = Chroma.from_documents(
documents=my_documents,
embedding=embedding,
persist_directory=persist_directory
)
vectordb.persist()
vectordb = None
print("Finished combining databases")
# Retrieve From Information
def retrieve_from_information(user_question,return_que):
# Setup imports
imports_done.wait()
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
# Get vectordb
persist_directory = 'combined_db'
embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
# Retrive documents
docs_and_scores = vectordb.similarity_search_with_score(user_question)
# Get docs from docs_and_scores
docs=[]
scores=[]
for item in docs_and_scores:
docs.append(item[0])
scores.append(item[1])
print(scores)
# Open the website for the first doc
open_website(docs[0])
# Pass the docs to the main app
return_que.put(docs)
# Retrieve From Folder
def retrieve_from_folder(user_question):
# Setup imports
imports_done.wait()
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
# Get vectordb
persist_directory = 'folder_db'
embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
# Retrive documents
docs_and_scores = vectordb.similarity_search_with_score(user_question)
# Get docs from docs_and_scores
docs=[]
scores=[]
for item in docs_and_scores:
docs.append(item[0])
scores.append(item[1])
print(scores)
# Open the website for the first doc
open_website(docs[0])
# Opens website of a document
def open_website(doc):
if doc==None:
print("No documents found")
else:
url=None
if "spreadsheet" in doc.metadata["mimeType"]:
url = "https://docs.google.com/spreadsheets/d/"+doc.metadata["id"]
elif "document" in doc.metadata["mimeType"]:
url = "https://docs.google.com/document/d/"+doc.metadata["id"]
elif "folder" in doc.metadata["mimeType"]:
url = "https://drive.google.com/drive/folders/"+doc.metadata["id"]
print(url)
if url != None:
webbrowser.open(url)
| [
"langchain.prompts.chat.SystemMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.vectorstores.Chroma.from_documents",
"langchain.prompts.chat.HumanMessagePromptTemplate.from_template",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.prompts.chat.ChatPromptTemplate.from_messages",
"langchain.vectorstores.Chroma"
] | [((1100, 1119), 'deferred_imports.imports_done.wait', 'imports_done.wait', ([], {}), '()\n', (1117, 1119), False, 'from deferred_imports import langchain, imports_done\n'), ((2780, 2838), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['system_template'], {}), '(system_template)\n', (2821, 2838), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2857, 2913), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (2897, 2913), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2934, 2993), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['system_template2'], {}), '(system_template2)\n', (2975, 2993), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((3074, 3120), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['message_list'], {}), '(message_list)\n', (3106, 3120), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((3504, 3549), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': 'model_name', 'temperature': '(0.3)'}), '(model=model_name, temperature=0.3)\n', (3514, 3549), False, 'from langchain.chat_models import ChatOpenAI\n'), ((7004, 7023), 'deferred_imports.imports_done.wait', 'imports_done.wait', ([], {}), '()\n', (7021, 7023), False, 'from deferred_imports import langchain, imports_done\n'), ((8639, 8697), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['system_template'], {}), '(system_template)\n', (8680, 8697), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((8716, 8772), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (8756, 8772), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((8793, 8852), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['system_template2'], {}), '(system_template2)\n', (8834, 8852), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((8933, 8979), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['message_list'], {}), '(message_list)\n', (8965, 8979), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((9388, 9433), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': 'model_name', 'temperature': '(0.3)'}), '(model=model_name, temperature=0.3)\n', (9398, 9433), False, 'from langchain.chat_models import ChatOpenAI\n'), ((10991, 11010), 'deferred_imports.imports_done.wait', 'imports_done.wait', ([], {}), '()\n', (11008, 11010), False, 'from deferred_imports import langchain, imports_done\n'), ((12729, 12777), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (12745, 12777), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((12793, 12866), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'persist_directory', 'embedding_function': 'embedding'}), '(persist_directory=persist_directory, embedding_function=embedding)\n', (12799, 12866), False, 'from langchain.vectorstores import Chroma\n'), ((12999, 13106), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', ([], {'documents': 'my_documents', 'embedding': 'embedding', 'persist_directory': 'persist_directory'}), '(documents=my_documents, embedding=embedding,\n persist_directory=persist_directory)\n', (13020, 13106), False, 'from langchain.vectorstores import Chroma\n'), ((13356, 13375), 'deferred_imports.imports_done.wait', 'imports_done.wait', ([], {}), '()\n', (13373, 13375), False, 'from deferred_imports import langchain, imports_done\n'), ((15282, 15330), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (15298, 15330), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((15346, 15419), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'persist_directory', 'embedding_function': 'embedding'}), '(persist_directory=persist_directory, embedding_function=embedding)\n', (15352, 15419), False, 'from langchain.vectorstores import Chroma\n'), ((15552, 15659), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', ([], {'documents': 'my_documents', 'embedding': 'embedding', 'persist_directory': 'persist_directory'}), '(documents=my_documents, embedding=embedding,\n persist_directory=persist_directory)\n', (15573, 15659), False, 'from langchain.vectorstores import Chroma\n'), ((15885, 15904), 'deferred_imports.imports_done.wait', 'imports_done.wait', ([], {}), '()\n', (15902, 15904), False, 'from deferred_imports import langchain, imports_done\n'), ((16079, 16127), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (16095, 16127), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((16143, 16216), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'persist_directory', 'embedding_function': 'embedding'}), '(persist_directory=persist_directory, embedding_function=embedding)\n', (16149, 16216), False, 'from langchain.vectorstores import Chroma\n'), ((16717, 16736), 'deferred_imports.imports_done.wait', 'imports_done.wait', ([], {}), '()\n', (16734, 16736), False, 'from deferred_imports import langchain, imports_done\n'), ((16909, 16957), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (16925, 16957), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((16973, 17046), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'persist_directory', 'embedding_function': 'embedding'}), '(persist_directory=persist_directory, embedding_function=embedding)\n', (16979, 17046), False, 'from langchain.vectorstores import Chroma\n'), ((735, 757), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (749, 757), False, 'import asyncio\n'), ((10144, 10183), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (10153, 10183), False, 'import yaml\n'), ((10339, 10375), 'yaml.dump', 'yaml.dump', (['information_dict', 'outfile'], {}), '(information_dict, outfile)\n', (10348, 10375), False, 'import yaml\n'), ((10497, 10536), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (10506, 10536), False, 'import yaml\n'), ((10617, 10656), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (10626, 10656), False, 'import yaml\n'), ((10830, 10867), 'yaml.dump', 'yaml.dump', (['folder_dictionary', 'outfile'], {}), '(folder_dictionary, outfile)\n', (10839, 10867), False, 'import yaml\n'), ((11954, 11990), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (11963, 11990), False, 'import yaml\n'), ((14948, 14984), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (14957, 14984), False, 'import yaml\n'), ((15051, 15087), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (15060, 15087), False, 'import yaml\n'), ((17972, 17992), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (17987, 17992), False, 'import webbrowser\n'), ((6557, 6579), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (6571, 6579), False, 'import asyncio\n')] |
import tensorflow
import dotenv
import transformers
from tensorflow import keras
from dotenv import find_dotenv, load_dotenv
from transformers import pipeline
import langchain
from langchain import PromptTemplate, LLMChain, OpenAI
import requests
import os
import openai
import streamlit as st
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
openai.api_key = os.getenv("OPENAI_API_KEY")
load_dotenv(find_dotenv())
# img to text
def img2text(data):
image_to_text = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
text = image_to_text(data)[0]["generated_text"]
print(text)
return text
# llm
def generate_story(scenario):
template = """
You are a story teller;
You can generate a short story based on a simple narrative, the story should be no more than 20 words;
CONTEXT: {scenario}
STORY:
"""
prompt = PromptTemplate(template=template, input_variables=["scenario"])
story_llm = LLMChain(llm=OpenAI(model_name='gpt-3.5-turbo', temperature=1),
prompt=prompt, verbose=True)
story = story_llm.predict(scenario=scenario)
print(story)
return story
# txt to speech
def text2speech(message):
API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
headers = {"Authorization": f"Bearer {HUGGINGFACEHUB_API_TOKEN}"}
payload = { "inputs": message }
response = requests.post(API_URL, headers=headers, json=payload)
with open('audio.flac', 'wb') as file:
file.write(response.content)
def main():
st.set_page_config(page_title="Image to Speech", page_icon="🤗")
uploaded_file = st.file_uploader("Choose an image...", type="jpg")
if uploaded_file is not None:
print(uploaded_file)
bytes_data = uploaded_file.getvalue()
with open(uploaded_file.name, 'wb') as file:
file.write(bytes_data)
st.image(bytes_data, caption='Uploaded Image.', use_column_width=True)
scenario = img2text(uploaded_file.name)
story = generate_story(scenario)
text2speech(story)
with st.expander("scenario"):
st.write(scenario)
with st.expander("story"):
st.write(story)
st.audio('audio.flac')
if __name__ == '__main__':
main()
| [
"langchain.OpenAI",
"langchain.PromptTemplate"
] | [((324, 361), 'os.getenv', 'os.getenv', (['"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "('HUGGINGFACEHUB_API_TOKEN')\n", (333, 361), False, 'import os\n'), ((379, 406), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (388, 406), False, 'import os\n'), ((420, 433), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (431, 433), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((491, 563), 'transformers.pipeline', 'pipeline', (['"""image-to-text"""'], {'model': '"""Salesforce/blip-image-captioning-base"""'}), "('image-to-text', model='Salesforce/blip-image-captioning-base')\n", (499, 563), False, 'from transformers import pipeline\n'), ((905, 968), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['scenario']"}), "(template=template, input_variables=['scenario'])\n", (919, 968), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1449, 1502), 'requests.post', 'requests.post', (['API_URL'], {'headers': 'headers', 'json': 'payload'}), '(API_URL, headers=headers, json=payload)\n', (1462, 1502), False, 'import requests\n'), ((1601, 1664), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Image to Speech"""', 'page_icon': '"""🤗"""'}), "(page_title='Image to Speech', page_icon='🤗')\n", (1619, 1664), True, 'import streamlit as st\n'), ((1685, 1735), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose an image..."""'], {'type': '"""jpg"""'}), "('Choose an image...', type='jpg')\n", (1701, 1735), True, 'import streamlit as st\n'), ((1942, 2012), 'streamlit.image', 'st.image', (['bytes_data'], {'caption': '"""Uploaded Image."""', 'use_column_width': '(True)'}), "(bytes_data, caption='Uploaded Image.', use_column_width=True)\n", (1950, 2012), True, 'import streamlit as st\n'), ((2287, 2309), 'streamlit.audio', 'st.audio', (['"""audio.flac"""'], {}), "('audio.flac')\n", (2295, 2309), True, 'import streamlit as st\n'), ((999, 1048), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(1)'}), "(model_name='gpt-3.5-turbo', temperature=1)\n", (1005, 1048), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((2151, 2174), 'streamlit.expander', 'st.expander', (['"""scenario"""'], {}), "('scenario')\n", (2162, 2174), True, 'import streamlit as st\n'), ((2188, 2206), 'streamlit.write', 'st.write', (['scenario'], {}), '(scenario)\n', (2196, 2206), True, 'import streamlit as st\n'), ((2220, 2240), 'streamlit.expander', 'st.expander', (['"""story"""'], {}), "('story')\n", (2231, 2240), True, 'import streamlit as st\n'), ((2254, 2269), 'streamlit.write', 'st.write', (['story'], {}), '(story)\n', (2262, 2269), True, 'import streamlit as st\n')] |
"""Chat with a model using LangChain"""
from dotenv import load_dotenv
from langchain_core.messages import HumanMessage, SystemMessage
from genai import Client, Credentials
from genai.extensions.langchain.chat_llm import LangChainChatInterface
from genai.schema import (
DecodingMethod,
ModerationHAP,
ModerationParameters,
TextGenerationParameters,
TextGenerationReturnOptions,
)
# make sure you have a .env file under genai root with
# GENAI_KEY=<your-genai-key>
# GENAI_API=<genai-api-endpoint> (optional) DEFAULT_API = "https://bam-api.res.ibm.com"
load_dotenv()
def heading(text: str) -> str:
"""Helper function for centering text."""
return "\n" + f" {text} ".center(80, "=") + "\n"
llm = LangChainChatInterface(
client=Client(credentials=Credentials.from_env()),
model_id="meta-llama/llama-2-70b-chat",
parameters=TextGenerationParameters(
decoding_method=DecodingMethod.SAMPLE,
max_new_tokens=100,
min_new_tokens=10,
temperature=0.5,
top_k=50,
top_p=1,
return_options=TextGenerationReturnOptions(input_text=False, input_tokens=True),
),
moderations=ModerationParameters(
# Threshold is set to very low level to flag everything (testing purposes)
# or set to True to enable HAP with default settings
hap=ModerationHAP(input=True, output=False, threshold=0.01)
),
)
print(heading("Start conversation with langchain"))
prompt = "Describe what is Python in one sentence."
print(f"Request: {prompt}")
result = llm.generate(
messages=[
[
SystemMessage(
content="""You are a helpful, respectful and honest assistant.
Always answer as helpfully as possible, while being safe.
Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.
Please ensure that your responses are socially unbiased and positive in nature. If a question does not make
any sense, or is not factually coherent, explain why instead of answering something incorrectly.
If you don't know the answer to a question, please don't share false information.
""",
),
HumanMessage(content=prompt),
]
],
)
conversation_id = result.generations[0][0].generation_info["meta"]["conversation_id"]
print(f"New conversation with ID '{conversation_id}' has been created!")
print(f"Response: {result.generations[0][0].text}")
print(result.llm_output)
print(result.generations[0][0].generation_info)
print(heading("Continue conversation with langchain"))
prompt = "Show me some simple code example."
print(f"Request: {prompt}")
result = llm.generate(
messages=[[HumanMessage(content=prompt)]], conversation_id=conversation_id, use_conversation_parameters=True
)
print(f"Response: {result.generations[0][0].text}")
| [
"langchain_core.messages.HumanMessage",
"langchain_core.messages.SystemMessage"
] | [((576, 589), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (587, 589), False, 'from dotenv import load_dotenv\n'), ((784, 806), 'genai.Credentials.from_env', 'Credentials.from_env', ([], {}), '()\n', (804, 806), False, 'from genai import Client, Credentials\n'), ((1079, 1143), 'genai.schema.TextGenerationReturnOptions', 'TextGenerationReturnOptions', ([], {'input_text': '(False)', 'input_tokens': '(True)'}), '(input_text=False, input_tokens=True)\n', (1106, 1143), False, 'from genai.schema import DecodingMethod, ModerationHAP, ModerationParameters, TextGenerationParameters, TextGenerationReturnOptions\n'), ((1346, 1401), 'genai.schema.ModerationHAP', 'ModerationHAP', ([], {'input': '(True)', 'output': '(False)', 'threshold': '(0.01)'}), '(input=True, output=False, threshold=0.01)\n', (1359, 1401), False, 'from genai.schema import DecodingMethod, ModerationHAP, ModerationParameters, TextGenerationParameters, TextGenerationReturnOptions\n'), ((1604, 2150), 'langchain_core.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a helpful, respectful and honest assistant.\nAlways answer as helpfully as possible, while being safe.\nYour answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.\nPlease ensure that your responses are socially unbiased and positive in nature. If a question does not make\nany sense, or is not factually coherent, explain why instead of answering something incorrectly.\nIf you don\'t know the answer to a question, please don\'t share false information.\n"""'}), '(content=\n """You are a helpful, respectful and honest assistant.\nAlways answer as helpfully as possible, while being safe.\nYour answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.\nPlease ensure that your responses are socially unbiased and positive in nature. If a question does not make\nany sense, or is not factually coherent, explain why instead of answering something incorrectly.\nIf you don\'t know the answer to a question, please don\'t share false information.\n"""\n )\n', (1617, 2150), False, 'from langchain_core.messages import HumanMessage, SystemMessage\n'), ((2185, 2213), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (2197, 2213), False, 'from langchain_core.messages import HumanMessage, SystemMessage\n'), ((2685, 2713), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (2697, 2713), False, 'from langchain_core.messages import HumanMessage, SystemMessage\n')] |
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from redundant_filter_retriever import RedundantFilterRetriever
from dotenv import load_dotenv
import langchain
langchain.debug = True
load_dotenv()
chat = ChatOpenAI()
embeddings = OpenAIEmbeddings()
db = Chroma(
persist_directory="emb",
embedding_function=embeddings
)
retriever = RedundantFilterRetriever(
embeddings=embeddings,
chroma=db
)
chain = RetrievalQA.from_chain_type(
llm=chat,
retriever=retriever,
chain_type="stuff"
)
result = chain.run("What is an interesting fact about the English language?")
print(result)
| [
"langchain.vectorstores.Chroma",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.chat_models.ChatOpenAI"
] | [((315, 328), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (326, 328), False, 'from dotenv import load_dotenv\n'), ((337, 349), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (347, 349), False, 'from langchain.chat_models import ChatOpenAI\n'), ((363, 381), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (379, 381), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((387, 449), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""emb"""', 'embedding_function': 'embeddings'}), "(persist_directory='emb', embedding_function=embeddings)\n", (393, 449), False, 'from langchain.vectorstores import Chroma\n'), ((472, 530), 'redundant_filter_retriever.RedundantFilterRetriever', 'RedundantFilterRetriever', ([], {'embeddings': 'embeddings', 'chroma': 'db'}), '(embeddings=embeddings, chroma=db)\n', (496, 530), False, 'from redundant_filter_retriever import RedundantFilterRetriever\n'), ((550, 628), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'retriever': 'retriever', 'chain_type': '"""stuff"""'}), "(llm=chat, retriever=retriever, chain_type='stuff')\n", (577, 628), False, 'from langchain.chains import RetrievalQA\n')] |
from llama_index.llms import LangChainLLM
from langchain.llms import Clarifai
from llama_index import VectorStoreIndex, SummaryIndex
from llama_index import ServiceContext
from llama_index import Document
from llama_index import SimpleDirectoryReader
from llama_index.prompts import PromptTemplate
from llama_index.chat_engine.simple import SimpleChatEngine
from llama_index import LLMPredictor
from pypdf import PdfReader
import streamlit as st
# Your PAT (Personal Access Token) can be found in the portal under Authentification
PAT = st.secrets.cf_pat
# Specify the correct user_id/app_id pairings
# Since you're making inferences outside your app's scope
USER_ID = 'anthropic'
APP_ID = 'completion'
# Change these to whatever model and text URL you want to use
MODEL_ID = 'claude-v2'
MODEL_VERSION_ID = 'ad16eda6ac054796bf9f348ab6733c72'
class LLM_Hander:
def __init__(self):
pat = PAT
Cfllm = LangChainLLM(llm = Clarifai(
pat=pat, user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID
))
self.documents = []
self.chat_template = """
I want you to act as a teacher's assistant. Your task is to help teachers in cutting down their boring tasks.
Follow the instructions given by the teacher and produce the outputs that the teacher is requesting.
Retrieve information from the vector index about current input within the service context.
Also follow the below rules when you respond.
1. Avoid generating single quotes in your response.
2. DONOT return JSON response.
3. Use easy to understand english.
"""
self.service_context = ServiceContext.from_defaults(llm_predictor=LLMPredictor(llm=Cfllm),
system_prompt=self.chat_template)
self.query_index = None
self.summary_index = None
def loadData(self, files):
texts = []
for f in files:
st.session_state.fnames.append(f.name)
reader = PdfReader(f)
text = ""
numPages = reader.pages
for page in numPages:
text += f'Page number: {reader.get_page_number(page)} \n Content : {page.extract_text()}'
texts.append(text)
self.documents = [Document(text=t) for t in texts]
def prepareIndex(self):
self.query_index = VectorStoreIndex.from_documents(self.documents, service_context=self.service_context)
self.summary_index = SummaryIndex.from_documents(self.documents, service_context=self.service_context)
def prepareBots(self):
self.query_engine = self.query_index.as_query_engine()
self.chat_engine = self.summary_index.as_chat_engine(chat_mode='react', verbose=True)
def firePrompt(self, role, prompt):
if role == 'student':
try:
return self.query_engine.query(prompt)
except:
return 'some error occured. please retry'
if role == 'teacher':
try:
return self.chat_engine.chat(prompt)
except:
return 'some error occured. please retry'
def clearHistory(self):
self.chat_engine.reset()
| [
"langchain.llms.Clarifai"
] | [((2438, 2528), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['self.documents'], {'service_context': 'self.service_context'}), '(self.documents, service_context=self.\n service_context)\n', (2469, 2528), False, 'from llama_index import VectorStoreIndex, SummaryIndex\n'), ((2553, 2639), 'llama_index.SummaryIndex.from_documents', 'SummaryIndex.from_documents', (['self.documents'], {'service_context': 'self.service_context'}), '(self.documents, service_context=self.\n service_context)\n', (2580, 2639), False, 'from llama_index import VectorStoreIndex, SummaryIndex\n'), ((2016, 2054), 'streamlit.session_state.fnames.append', 'st.session_state.fnames.append', (['f.name'], {}), '(f.name)\n', (2046, 2054), True, 'import streamlit as st\n'), ((2076, 2088), 'pypdf.PdfReader', 'PdfReader', (['f'], {}), '(f)\n', (2085, 2088), False, 'from pypdf import PdfReader\n'), ((2345, 2361), 'llama_index.Document', 'Document', ([], {'text': 't'}), '(text=t)\n', (2353, 2361), False, 'from llama_index import Document\n'), ((942, 1010), 'langchain.llms.Clarifai', 'Clarifai', ([], {'pat': 'pat', 'user_id': 'USER_ID', 'app_id': 'APP_ID', 'model_id': 'MODEL_ID'}), '(pat=pat, user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)\n', (950, 1010), False, 'from langchain.llms import Clarifai\n'), ((1735, 1758), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'Cfllm'}), '(llm=Cfllm)\n', (1747, 1758), False, 'from llama_index import LLMPredictor\n')] |
# Import necessary modules for Hubspot API integration and Langchain analysis
import hubspot
import langchain
def retrieve_and_store_feedback(appointment_id):
"""
Function to retrieve and store customer feedback and ratings from the Hubspot App.
Input: appointment_id - ID of the appointment for which feedback is to be retrieved
Output: Returns the feedback and rating data in the form of a dictionary
"""
hubspot_api_key = "<your_hubspot_api_key>"
hubspot_client = hubspot.Client(api_key=hubspot_api_key)
# Use the Hubspot API to retrieve feedback and rating data for the given appointment ID
feedback_data = hubspot_client.crm.feedback.survey_results_api.get_all(
object_id=appointment_id,
object_type="APPOINTMENT"
)
# Store this data in Langchain analysis to generate personalized appointment reminders
langchain_client = langchain.Client(api_key="<your_langchain_api_key>")
langchain_client.store_feedback_data(appointment_id, feedback_data)
return feedback_data
| [
"langchain.Client"
] | [((507, 546), 'hubspot.Client', 'hubspot.Client', ([], {'api_key': 'hubspot_api_key'}), '(api_key=hubspot_api_key)\n', (521, 546), False, 'import hubspot\n'), ((966, 1018), 'langchain.Client', 'langchain.Client', ([], {'api_key': '"""<your_langchain_api_key>"""'}), "(api_key='<your_langchain_api_key>')\n", (982, 1018), False, 'import langchain\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.