File size: 7,487 Bytes
3d40d3b 1b7e1bb c3f257f eefba2d edc46fa 5431a98 ff2536f c72ad56 6bfcb60 629889e 3d40d3b edc46fa c6720e8 edc46fa 5431a98 da24b26 90964f5 a94da6c 90964f5 33be4fb 43d0d69 b1928ac 2ea893d ff2536f b1928ac 32d38dd b1928ac 8720ce5 b1928ac ac83ae2 b1928ac 3323043 40dcc90 704effa 7d745fb 56d437f 7d745fb 56d437f aba6ca0 7d745fb 56d437f 7d745fb 8663b79 9ad250a 8663b79 3eff0be 90545ed 8663b79 edc46fa 4e761ea 4d71c10 6a98842 4d71c10 b1928ac ff2536f b1928ac ff2536f b1928ac ff2536f 231d9e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
import os
import json
import bcrypt
import pandas as pd
import numpy as np
from typing import List
from pathlib import Path
from langchain_openai import ChatOpenAI
from langchain.schema.runnable.config import RunnableConfig
from langchain.schema import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain.agents import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent, create_csv_agent
import chainlit as cl
from chainlit.input_widget import TextInput, Select, Switch, Slider
from deep_translator import GoogleTranslator
from IPython.display import display
from surveycaa import surveyCaa
@cl.password_auth_callback
def auth_callback(username: str, password: str):
auth = json.loads(os.environ['CHAINLIT_AUTH_LOGIN'])
ident = next(d['ident'] for d in auth if d['ident'] == username)
pwd = next(d['pwd'] for d in auth if d['ident'] == username)
resultLogAdmin = bcrypt.checkpw(username.encode('utf-8'), bcrypt.hashpw(ident.encode('utf-8'), bcrypt.gensalt()))
resultPwdAdmin = bcrypt.checkpw(password.encode('utf-8'), bcrypt.hashpw(pwd.encode('utf-8'), bcrypt.gensalt()))
resultRole = next(d['role'] for d in auth if d['ident'] == username)
if resultLogAdmin and resultPwdAdmin and resultRole == "admindatapcc":
return cl.User(
identifier=ident + " : 🧑💼 Admin Datapcc", metadata={"role": "admin", "provider": "credentials"}
)
elif resultLogAdmin and resultPwdAdmin and resultRole == "userdatapcc":
return cl.User(
identifier=ident + " : 🧑🎓 User Datapcc", metadata={"role": "user", "provider": "credentials"}
)
def create_agent(filename: str):
"""
Create an agent that can access and use a large language model (LLM).
Args:
filename: The path to the CSV file that contains the data.
Returns:
An agent that can access and use the LLM.
"""
# Create an OpenAI object.
os.environ['OPENAI_API_KEY'] = os.environ['OPENAI_API_KEY']
llm = ChatOpenAI(temperature=0, model="gpt-4o-2024-05-13")
# Read the CSV file into a Pandas DataFrame.
df = pd.read_csv(filename)
# Create a Pandas DataFrame agent.
return create_csv_agent(llm, filename, verbose=False, allow_dangerous_code=True, handle_parsing_errors=True, agent_type=AgentType.OPENAI_FUNCTIONS)
def query_agent(agent, query):
"""
Query an agent and return the response as a string.
Args:
agent: The agent to query.
query: The query to ask the agent.
Returns:
The response from the agent as a string.
"""
prompt = (
"""
For the following query, if it requires drawing a table, reply as follows:
{"table": {"columns": ["column1", "column2", ...], "data": [[value1, value2, ...], [value1, value2, ...], ...]}}
If the query requires creating a bar chart, reply as follows:
{"bar": {"columns": ["A", "B", "C", ...], "data": [25, 24, 10, ...]}}
If the query requires creating a line chart, reply as follows:
{"line": {"columns": ["A", "B", "C", ...], "data": [25, 24, 10, ...]}}
There can only be two types of chart, "bar" and "line".
If it is just asking a question that requires neither, reply as follows:
{"answer": "answer"}
Example:
{"answer": "The title with the highest rating is 'Gilead'"}
If you do not know the answer, reply as follows:
{"answer": "I do not know."}
Return all output as a string.
All strings in "columns" list and data list, should be in double quotes,
For example: {"columns": ["title", "ratings_count"], "data": [["Gilead", 361], ["Spider's Web", 5164]]}
Lets think step by step.
Below is the query.
Query:
"""
+ query
)
# Run the prompt through the agent.
response = agent.invoke(prompt)
# Convert the response to a string.
return response.__str__()
def decode_response(response: str) -> dict:
"""This function converts the string response from the model to a dictionary object.
Args:
response (str): response from the model
Returns:
dict: dictionary with response data
"""
return json.loads("[" + response + "]")
def write_response(response_dict: dict):
"""
Write a response from an agent to a Streamlit app.
Args:
response_dict: The response from the agent.
Returns:
None.
"""
# Check if the response is an answer.
return response_dict["answer"]
@cl.action_callback("Download")
async def on_action(action):
content = []
content.append(action.value)
arrayContent = np.array(content)
df = pd.DataFrame(arrayContent)
with open('./' + action.description + '.txt', 'wb') as csv_file:
df.to_csv(path_or_buf=csv_file, index=False,header=False, encoding='utf-8')
elements = [
cl.File(
name= action.description + ".txt",
path="./" + action.description + ".txt",
display="inline",
),
]
await cl.Message(
content="[Lien] 🔗", elements=elements
).send()
await action.remove()
@cl.set_chat_profiles
async def chat_profile():
return [
cl.ChatProfile(name="Traitement des données d'enquête : «Expé CFA»",markdown_description="Questionnaire auprès des professionnels de la branche de l'agencement",icon="/public/logo-ofipe.png",),
cl.ChatProfile(name="Articles de recherche",markdown_description="Q&A sur la Pédagogie Durable",icon="/public/logo-ofipe.png",),
cl.ChatProfile(name="Articles de recherche",markdown_description="Q&A sur les lieux d'apprentissage",icon="/public/logo-ofipe.png",),
cl.ChatProfile(name="Articles de recherche",markdown_description="Q&A sur les espaces d'apprentissage",icon="/public/logo-ofipe.png",),
]
@cl.on_chat_start
async def on_chat_start():
await cl.Message(f"> SURVEYIA").send()
await surveyCaa()
@cl.on_message
async def on_message(message: cl.Message):
await cl.Message(f"> SURVEYIA").send()
agent = create_agent("./public/surveyia.csv")
cb = cl.AsyncLangchainCallbackHandler()
try:
res = await agent.acall("Réponds en langue française à la question suivante : " + message.content, callbacks=[cb])
await cl.Message(author="COPILOT",content=GoogleTranslator(source='auto', target='fr').translate(res['output'])).send()
except ValueError as e:
res = str(e)
resArray = res.split(":")
ans = ''
if str(res).find('parsing') != -1:
for i in range(2,len(resArray)):
ans += resArray[i]
await cl.Message(author="COPILOT",content=ans.replace("`","")).send()
else:
await cl.Message(author="COPILOT",content="Reformulez votre requête, s'il vous plait 😃").send()
# Query the agent.
#response = query_agent(agent=agent, query=message.content)
# Decode the response.
#decoded_response = decode_response(response)
# Write the response to the Streamlit app.
#result = write_response(decoded_response)
#await cl.Message(author="COPILOT",content=GoogleTranslator(source='auto', target='fr').translate(result)).send() |