Last commit not found
import os | |
import json | |
import bcrypt | |
import pandas as pd | |
import numpy as np | |
from typing import List | |
from pathlib import Path | |
from langchain_openai import ChatOpenAI, OpenAI | |
from langchain.schema.runnable.config import RunnableConfig | |
from langchain.schema import StrOutputParser | |
from langchain_core.prompts import ChatPromptTemplate | |
from langchain.agents import AgentExecutor | |
from langchain.agents.agent_types import AgentType | |
#from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent, create_csv_agent | |
from langchain_community.agent_toolkits import create_sql_agent | |
from langchain_community.utilities import SQLDatabase | |
from sqlalchemy import create_engine | |
import chainlit as cl | |
from chainlit.input_widget import TextInput, Select, Switch, Slider | |
from deep_translator import GoogleTranslator | |
from IPython.display import display | |
from surveycaa import surveyCaa, surveyRh | |
from literalai import LiteralClient | |
literal_client = LiteralClient(api_key=os.getenv("LITERAL_API_KEY")) | |
def auth_callback(username: str, password: str): | |
auth = json.loads(os.environ['CHAINLIT_AUTH_LOGIN']) | |
ident = next(d['ident'] for d in auth if d['ident'] == username) | |
pwd = next(d['pwd'] for d in auth if d['ident'] == username) | |
resultLogAdmin = bcrypt.checkpw(username.encode('utf-8'), bcrypt.hashpw(ident.encode('utf-8'), bcrypt.gensalt())) | |
resultPwdAdmin = bcrypt.checkpw(password.encode('utf-8'), bcrypt.hashpw(pwd.encode('utf-8'), bcrypt.gensalt())) | |
resultRole = next(d['role'] for d in auth if d['ident'] == username) | |
if resultLogAdmin and resultPwdAdmin and resultRole == "admindatapcc": | |
return cl.User( | |
identifier=ident + " : 🧑💼 Admin Datapcc", metadata={"role": "admin", "provider": "credentials"} | |
) | |
elif resultLogAdmin and resultPwdAdmin and resultRole == "userdatapcc": | |
return cl.User( | |
identifier=ident + " : 🧑🎓 User Datapcc", metadata={"role": "user", "provider": "credentials"} | |
) | |
def create_agent(filename: str): | |
""" | |
Créer un agent qui permet l'accès et l'usage d'un large language model (LLM). | |
Args: | |
filename: le chemin du fichier CSV qui contient les datas. | |
Returns: | |
Un agent qui peut accéder et utiliser le LLM. | |
""" | |
# Create an OpenAI object. | |
os.environ['OPENAI_API_KEY'] = os.environ['OPENAI_API_KEY'] | |
llm = ChatOpenAI(temperature=0, model="gpt-4o-2024-08-06") | |
# Read the CSV file into a Pandas DataFrame. | |
if cl.user_session.get("createdb") == None: | |
df = pd.read_csv(filename) | |
engine = create_engine("sqlite:///sphinx" + str(cl.user_session.get("id")) + ".db") | |
df.to_sql("sphinx" + str(cl.user_session.get("id")), engine, index=False) | |
db = SQLDatabase(engine=engine) | |
cl.user_session.set("createdb", "OK") | |
cl.user_session.set("db", db) | |
else: | |
db = cl.user_session.get("db") | |
# Create a SAL agent. | |
#e.g agent_executor.invoke({"input": "Quel est le nombre de chargé d'affaires en agencement par entreprise?"}) | |
return create_sql_agent(llm, db=db, agent_type="openai-tools", verbose=False) | |
def query_agent(agent, query): | |
""" | |
Query an agent and return the response as a string. | |
Args: | |
agent: The agent to query. | |
query: The query to ask the agent. | |
Returns: | |
The response from the agent as a string. | |
""" | |
prompt = ( | |
""" | |
For the following query, if it requires drawing a table, reply as follows: | |
{"table": {"columns": ["column1", "column2", ...], "data": [[value1, value2, ...], [value1, value2, ...], ...]}} | |
If the query requires creating a bar chart, reply as follows: | |
{"bar": {"columns": ["A", "B", "C", ...], "data": [25, 24, 10, ...]}} | |
If the query requires creating a line chart, reply as follows: | |
{"line": {"columns": ["A", "B", "C", ...], "data": [25, 24, 10, ...]}} | |
There can only be two types of chart, "bar" and "line". | |
If it is just asking a question that requires neither, reply as follows: | |
{"answer": "answer"} | |
Example: | |
{"answer": "The title with the highest rating is 'Gilead'"} | |
If you do not know the answer, reply as follows: | |
{"answer": "I do not know."} | |
Return all output as a string. | |
All strings in "columns" list and data list, should be in double quotes, | |
For example: {"columns": ["title", "ratings_count"], "data": [["Gilead", 361], ["Spider's Web", 5164]]} | |
Lets think step by step. | |
Below is the query. | |
Query: | |
""" | |
+ query | |
) | |
# Run the prompt through the agent. | |
response = agent.invoke(prompt) | |
# Convert the response to a string. | |
return response.__str__() | |
def decode_response(response: str) -> dict: | |
"""This function converts the string response from the model to a dictionary object. | |
Args: | |
response (str): response from the model | |
Returns: | |
dict: dictionary with response data | |
""" | |
return json.loads("[" + response + "]") | |
def write_response(response_dict: dict): | |
""" | |
Write a response from an agent to a Streamlit app. | |
Args: | |
response_dict: The response from the agent. | |
Returns: | |
None. | |
""" | |
# Check if the response is an answer. | |
return response_dict["answer"] | |
async def on_action(action): | |
content = [] | |
content.append(action.value) | |
arrayContent = np.array(content) | |
df = pd.DataFrame(arrayContent) | |
with open('./' + action.description + '.txt', 'wb') as csv_file: | |
df.to_csv(path_or_buf=csv_file, index=False,header=False, encoding='utf-8') | |
elements = [ | |
cl.File( | |
name= action.description + ".txt", | |
path="./" + action.description + ".txt", | |
display="inline", | |
), | |
] | |
await cl.Message( | |
content="[Lien] 🔗", elements=elements | |
).send() | |
await action.remove() | |
async def chat_profile(): | |
return [ | |
cl.ChatProfile(name="Survey RH",markdown_description="Questionnaire pour le séminaire RH",icon="/public/logo-ofipe.png",), | |
cl.ChatProfile(name="Survey CAA",markdown_description="Questionnaire auprès des professionnels de la branche de l'agencement",icon="/public/logo-ofipe.png",), | |
#cl.ChatProfile(name="Articles de recherche sur les lieux d'apprentissage",markdown_description="Q&A sur les lieux d'apprentissage",icon="/public/logo-ofipe.png",), | |
#cl.ChatProfile(name="Articles de recherche sur les espaces d'apprentissage",markdown_description="Q&A sur les espaces d'apprentissage",icon="/public/logo-ofipe.png",), | |
] | |
async def on_chat_start(): | |
await cl.Message(f"> SURVEYIA").send() | |
chat_profile = cl.user_session.get("chat_profile") | |
if chat_profile == "Survey CAA": | |
fileSurvey = await surveyCaa() | |
cl.user_session.set("fileSurvey", fileSurvey) | |
elif chat_profile == "Survey RH": | |
fileSurvey = await surveyRh() | |
cl.user_session.set("fileSurvey", fileSurvey) | |
async def on_message(message: cl.Message): | |
await cl.Message(f"> SURVEYIA").send() | |
agent = create_agent(cl.user_session.get("fileSurvey")) | |
cb = cl.AsyncLangchainCallbackHandler() | |
try: | |
#res = await agent.acall("Réponds en langue française à la question suivante : " + message.content, callbacks=[cb]) | |
res = await agent.ainvoke({"input": "Réponds de la manière la plus complète et la plus intelligible, en langue française, à la question suivante : " + message.content + ". Réponds au format markdown ou au format tableau si le résultat nécessite l'affichage d'un tableau."}) | |
#res = await agent.ainvoke("Réponds de la manière la plus complète et la plus intelligible, en langue française, à la question suivante : " + message.content + ". Réponds au format markdown ou au format tableau si le résultat nécessite l'affichage d'un tableau.") | |
await cl.Message(author="COPILOT",content=GoogleTranslator(source='auto', target='fr').translate(res['output'])).send() | |
except ValueError as e: | |
res = str(e) | |
resArray = res.split(":") | |
ans = '' | |
if str(res).find('parsing') != -1: | |
for i in range(2,len(resArray)): | |
ans += resArray[i] | |
await cl.Message(author="COPILOT",content=ans.replace("`","")).send() | |
else: | |
await cl.Message(author="COPILOT",content="Reformulez votre requête, s'il vous plait 😃").send() | |
# Query the agent. | |
#response = query_agent(agent=agent, query=message.content) | |
# Decode the response. | |
#decoded_response = decode_response(response) | |
# Write the response to the Streamlit app. | |
#result = write_response(decoded_response) | |
#await cl.Message(author="COPILOT",content=GoogleTranslator(source='auto', target='fr').translate(result)).send() |