File size: 9,111 Bytes
3d40d3b
1b7e1bb
c3f257f
eefba2d
edc46fa
5431a98
 
5fa284d
9d38d84
c72ad56
5fa284d
 
3d40d3b
edc46fa
 
41ad8c9
5fa284d
41ad8c9
 
edc46fa
5431a98
da24b26
90964f5
 
a94da6c
90964f5
7d001f6
 
 
43d0d69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb79c5c
7b79c1f
 
41ad8c9
7b79c1f
 
41ad8c9
7b79c1f
 
41ad8c9
7b79c1f
 
 
14bbf77
520a056
6710714
7b79c1f
ac418c5
83200e7
d58e490
 
83200e7
ac418c5
83200e7
 
 
41ad8c9
 
14bbf77
7b79c1f
b1928ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8720ce5
b1928ac
 
 
 
 
 
 
 
 
 
 
 
ac83ae2
b1928ac
 
 
 
 
 
 
 
 
 
 
 
 
3323043
40dcc90
704effa
7d745fb
 
 
 
 
56d437f
7d745fb
 
 
 
56d437f
aba6ca0
7d745fb
 
 
56d437f
7d745fb
 
 
8663b79
 
 
94228e4
 
43b9b8c
 
8663b79
 
3eff0be
 
 
94228e4
 
 
 
 
 
 
d6ace97
2c9d78b
 
 
52cb960
d6ace97
5fa284d
52cb960
 
5fa284d
41ad8c9
5fa284d
52cb960
 
 
 
 
 
 
 
 
 
7b79c1f
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
import os
import json
import bcrypt
import pandas as pd
import numpy as np
from typing import List
from pathlib import Path

from langchain_openai import ChatOpenAI, OpenAI
from langchain.schema.runnable.config import RunnableConfig
from langchain.schema import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate

from langchain.agents import AgentExecutor
from langchain.agents.agent_types import AgentType
#from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent, create_csv_agent
from langchain_community.agent_toolkits import create_sql_agent
from langchain_community.utilities import SQLDatabase
from sqlalchemy import create_engine

import chainlit as cl
from chainlit.input_widget import TextInput, Select, Switch, Slider

from deep_translator import GoogleTranslator
from IPython.display import display

from literalai import LiteralClient
literal_client = LiteralClient(api_key=os.getenv("LITERAL_API_KEY"))

@cl.password_auth_callback
def auth_callback(username: str, password: str):
    auth = json.loads(os.environ['CHAINLIT_AUTH_LOGIN'])
    ident = next(d['ident'] for d in auth if d['ident'] == username)
    pwd = next(d['pwd'] for d in auth if d['ident'] == username)
    resultLogAdmin = bcrypt.checkpw(username.encode('utf-8'), bcrypt.hashpw(ident.encode('utf-8'), bcrypt.gensalt())) 
    resultPwdAdmin = bcrypt.checkpw(password.encode('utf-8'), bcrypt.hashpw(pwd.encode('utf-8'), bcrypt.gensalt())) 
    resultRole = next(d['role'] for d in auth if d['ident'] == username)
    if resultLogAdmin and resultPwdAdmin and resultRole == "admindatapcc":
        return cl.User(
            identifier=ident + " : 🧑‍💼 Admin Datapcc", metadata={"role": "admin", "provider": "credentials"}
        )
    elif resultLogAdmin and resultPwdAdmin and resultRole == "userdatapcc":
        return cl.User(
            identifier=ident + " : 🧑‍🎓 User Datapcc", metadata={"role": "user", "provider": "credentials"}
        )
        
@cl.step(type="run")
def create_agent(filename: str):
    """
    Créer un agent qui permet l'accès et l'usage d'un large language model (LLM).

    Args:
        filename: le chemin du fichier CSV qui contient les datas.

    Returns:
        Un agent qui peut accéder et utiliser le LLM.
    """

    # Create an OpenAI object.
    os.environ['OPENAI_API_KEY'] = os.environ['OPENAI_API_KEY']
    llm = ChatOpenAI(temperature=0, model="gpt-4o-2024-08-06")
    
    # Read the CSV file into a Pandas DataFrame.
    if cl.user_session.get("createdb") == None:
        df = pd.read_csv(filename)
        engine = create_engine("sqlite:///sphinx" + str(cl.user_session.get("id")) + ".db")
        df.to_sql("sphinx" + str(cl.user_session.get("id")), engine, index=False)
        db = SQLDatabase(engine=engine)
        cl.user_session.set("createdb", "OK")
        cl.user_session.set("db", db)
    else:
        db = cl.user_session.get("db")
    # Create a SAL agent.
    #e.g agent_executor.invoke({"input": "Quel est le nombre de chargé d'affaires en agencement par entreprise?"})
    return create_sql_agent(llm, db=db, agent_type="openai-tools", verbose=False)

def query_agent(agent, query):
    """
    Query an agent and return the response as a string.

    Args:
        agent: The agent to query.
        query: The query to ask the agent.

    Returns:
        The response from the agent as a string.
    """

    prompt = (
        """
            For the following query, if it requires drawing a table, reply as follows:
            {"table": {"columns": ["column1", "column2", ...], "data": [[value1, value2, ...], [value1, value2, ...], ...]}}

            If the query requires creating a bar chart, reply as follows:
            {"bar": {"columns": ["A", "B", "C", ...], "data": [25, 24, 10, ...]}}

            If the query requires creating a line chart, reply as follows:
            {"line": {"columns": ["A", "B", "C", ...], "data": [25, 24, 10, ...]}}

            There can only be two types of chart, "bar" and "line".

            If it is just asking a question that requires neither, reply as follows:
            {"answer": "answer"}
            Example:
            {"answer": "The title with the highest rating is 'Gilead'"}

            If you do not know the answer, reply as follows:
            {"answer": "I do not know."}

            Return all output as a string.

            All strings in "columns" list and data list, should be in double quotes,

            For example: {"columns": ["title", "ratings_count"], "data": [["Gilead", 361], ["Spider's Web", 5164]]}

            Lets think step by step.

            Below is the query.
            Query: 
            """
        + query
    )

    # Run the prompt through the agent.
    response = agent.invoke(prompt)
    # Convert the response to a string.
    return response.__str__()
    
def decode_response(response: str) -> dict:
    """This function converts the string response from the model to a dictionary object.

    Args:
        response (str): response from the model

    Returns:
        dict: dictionary with response data
    """
    return json.loads("[" + response + "]")

def write_response(response_dict: dict):
    """
    Write a response from an agent to a Streamlit app.

    Args:
        response_dict: The response from the agent.

    Returns:
        None.
    """

    # Check if the response is an answer.
    return response_dict["answer"]

@cl.action_callback("Download")
async def on_action(action):
    content = []
    content.append(action.value)
    arrayContent = np.array(content)
    df = pd.DataFrame(arrayContent)
    with open('./' + action.description + '.txt', 'wb') as csv_file:
        df.to_csv(path_or_buf=csv_file, index=False,header=False, encoding='utf-8')
    elements = [
        cl.File(
            name= action.description + ".txt",
            path="./" + action.description + ".txt",
            display="inline",
        ),
    ]
    await cl.Message(
        content="[Lien] 🔗", elements=elements
    ).send()
    await action.remove()
    
@cl.set_chat_profiles
async def chat_profile():
    return [
        cl.ChatProfile(name="Survey RH",markdown_description="Questionnaire pour le séminaire RH",icon="/public/logo-ofipe.png",),
        cl.ChatProfile(name="Survey CAA",markdown_description="Questionnaire auprès des professionnels de la branche de l'agencement",icon="/public/logo-ofipe.png",),
        #cl.ChatProfile(name="Articles de recherche sur les lieux d'apprentissage",markdown_description="Q&A sur les lieux d'apprentissage",icon="/public/logo-ofipe.png",),
        #cl.ChatProfile(name="Articles de recherche sur les espaces d'apprentissage",markdown_description="Q&A sur les espaces d'apprentissage",icon="/public/logo-ofipe.png",),
    ]
    
@cl.on_chat_start
async def on_chat_start():
    await cl.Message(f"> SURVEYIA").send()
    chat_profile = cl.user_session.get("chat_profile")
    if chat_profile == "Survey CAA":
        from surveycaa import surveyCaa
        fileSurvey = await surveyCaa()
    elif chat_profile == "Survey RH":
        from surveycaa import surveyRh
        fileSurvey = await surveyCaa()
    cl.user_session.set("fileSurvey", fileSurvey)

@cl.on_message
async def on_message(message: cl.Message):
    await cl.Message(f"> SURVEYIA").send()
    agent = create_agent(cl.user_session.get("fileSurvey"))
    cb = cl.AsyncLangchainCallbackHandler()
    try:
        #res = await agent.acall("Réponds en langue française à la question suivante : " + message.content, callbacks=[cb])
        res = await agent.ainvoke({"input": "Réponds de la manière la plus complète et la plus intelligible, en langue française, à la question suivante : " + message.content + ". Réponds au format markdown ou au format tableau si le résultat nécessite l'affichage d'un tableau."})
        #res = await agent.ainvoke("Réponds de la manière la plus complète et la plus intelligible, en langue française, à la question suivante : " + message.content + ". Réponds au format markdown ou au format tableau si le résultat nécessite l'affichage d'un tableau.")
        await cl.Message(author="COPILOT",content=GoogleTranslator(source='auto', target='fr').translate(res['output'])).send()
    except ValueError as e:
        res = str(e)
        resArray = res.split(":")
        ans = ''
        if str(res).find('parsing') != -1:
            for i in range(2,len(resArray)):
                ans += resArray[i]
            await cl.Message(author="COPILOT",content=ans.replace("`","")).send()
        else:
            await cl.Message(author="COPILOT",content="Reformulez votre requête, s'il vous plait 😃").send()
    # Query the agent.
    #response = query_agent(agent=agent, query=message.content)
    # Decode the response.
    #decoded_response = decode_response(response)

    # Write the response to the Streamlit app.
    #result = write_response(decoded_response)   
    #await cl.Message(author="COPILOT",content=GoogleTranslator(source='auto', target='fr').translate(result)).send()