File size: 2,742 Bytes
f843983
 
 
 
 
 
 
 
 
 
 
 
 
 
1fd4ac2
f843983
1fd4ac2
fe6ba28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ccb529
fe6ba28
 
9ccb529
 
45ba126
 
9ccb529
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os
import streamlit as st



def main():


    

    st.set_page_config(page_title="Info Assistant: ",
                       page_icon=":books:")
    

    st.header("Info Assistant :" ":books:")
    
    st.markdown("###### Get support of "Info Assistant" , who has in memory a lot of Data Science related articles, if it can't answer based on it's knowledge base, information will be found on the internet:" ":books:")


    if "messages" not in st.session_state:
        st.session_state["messages"] = [
        {"role": "assistant", "content": "Hi, I'm a chatbot who is  based on respublic of Lithuania law documents. How can I help you?"}
    ]


    search_type = st.selectbox(
        "Choose search type. Options are [Max marginal relevance search (similarity) , Similarity search (similarity). Default value (similarity)]", 
        options=["mmr", "similarity"], 
        index=1  
    )

    k = st.select_slider(
        "Select amount of documents to be retrieved. Default value (5): ", 
        options=list(range(2, 16)), 
        value=4  
    )
    retriever = create_retriever_from_chroma(vectorstore_path="docs/chroma/", search_type=search_type, k=k, chunk_size=350, chunk_overlap=30)



    # Graph
    workflow = StateGraph(GraphState)

    # Define the nodes
    workflow.add_node("ask_question", ask_question)
    workflow.add_node("retrieve", retrieve)  # retrieve
    workflow.add_node("grade_documents", grade_documents)  # grade documents
    workflow.add_node("generate", generate)  # generatae
    workflow.add_node("web_search", web_search)  # web search
    workflow.add_node("transform_query", transform_query)


    # Build graph
    workflow.set_entry_point("ask_question")
    workflow.add_conditional_edges(
        "ask_question",
        grade_question_toxicity,
    
        {
        "good": "retrieve",
        'bad': END,
        
        },
    )

    workflow.add_edge("retrieve", "grade_documents")
    workflow.add_conditional_edges(
        "grade_documents",
        decide_to_generate,
        {
            "search": "web_search",
            "generate": "generate",
        
        },
    )
    workflow.add_edge("web_search", "generate")
    workflow.add_conditional_edges(
        "generate",
        grade_generation_v_documents_and_question,
        {
            "not supported": "generate",
            "useful": END,
            "not useful": "transform_query",
        },
    )

    workflow.add_edge("transform_query", "retrieve")

    custom_graph = workflow.compile()
    
    

    
    if user_question := st.text_input("Ask a question about your documents:"):
    handle_userinput(user_question,retriever,rag_chain)            



if __name__ == "__main__":
    main()