File size: 4,530 Bytes
bd6aadd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
# -*- coding: utf-8 -*-
"""custom_chatbot.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1xT4n5rN6yNyzf-CO8Pifz0yWCEI4VmjV

# Install the dependicies
Run the code below to install the depencies we need for our functions
"""

# Commented out IPython magic to ensure Python compatibility.
# %pip install llama-index
# %pip install langchain
# %pip install gradio
# %pip install openai

"""### **How to Train with your data. **
You can use your github repository link. Make sure repository name should be same as given repo.
"""

!git clone https://github.com/talib-raath/context_data.git

"""# Define the functions
The following code defines the functions we need to construct the index and query it
"""

from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
import sys
import os
from IPython.display import Markdown, display

def construct_index(directory_path):
    # set maximum input size
    max_input_size = 4096
    # set number of output tokens
    num_outputs = 2000
    # set maximum chunk overlap
    max_chunk_overlap = 20
    # set chunk size limit
    chunk_size_limit = 600

    # define LLM
    llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
    prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)

    documents = SimpleDirectoryReader(directory_path).load_data()

    index = GPTSimpleVectorIndex.from_documents(documents)

    index.save_to_disk('index.json')

    return index

def ask_ai():
    index = GPTSimpleVectorIndex.load_from_disk('index.json')
    while True: 
        query = input("What do you want to ask? ")
        response = index.query(query, response_mode="compact")
        display(Markdown(f"Response: <b>{response.response}</b>"))

"""# Set OpenAI API Key
You can use this key also but it may expire if it does not work you can get his own api key 

**Use this Key** 


  "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"

You need an OPENAI API key to be able to run this code.

If you don't have one yet, get it by [signing up](https://platform.openai.com/overview). Then click your account icon on the top right of the screen and select "View API Keys". Create an API key.

Then run the code below and paste your API key into the text input.
"""

os.environ["OPENAI_API_KEY"] = input("Paste your OpenAI key here and hit enter:")

"""#Construct an index
Now we are ready to construct the index. This will take every file in the folder 'data', split it into chunks, and embed it with OpenAI's embeddings API.

**Notice:** running this code will cost you credits on your OpenAPI account ($0.02 for every 1,000 tokens). If you've just set up your account, the free credits that you have should be more than enough for this experiment.
"""

construct_index("context_data")

import tkinter as tk
from llama_index import GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
from IPython.display import Markdown, display

# Define the ask_ai() function
def ask_ai(question):
    index = GPTSimpleVectorIndex.load_from_disk('index.json')
    response = index.query(question, response_mode="compact")
    return response.response

# Define the GUI
class ChatBotGUI:
    def __init__(self, master):
        self.master = master
        master.title("Chat Bot")

        # Create a label and an entry for the question
        self.label = tk.Label(master, text="Ask me anything:")
        self.label.pack()

        self.entry = tk.Entry(master)
        self.entry.pack()

        # Create a button to submit the question
        self.button = tk.Button(master, text="Submit", command=self.submit_question)
        self.button.pack()

        # Create a text box to display the response
        self.textbox = tk.Text(master)
        self.textbox.pack()

    def submit_question(self):
        question = self.entry.get()
        response = ask_ai(question)
        self.textbox.insert(tk.END, "You: " + question + "\n")
        self.textbox.insert(tk.END, "Bot: " + response + "\n\n")
        self.entry.delete(0, tk.END)

# Create an instance of the GUI and start the main loop

'''
root = tk.Tk()
chatbot_gui = ChatBotGUI(root)
root.mainloop()
'''

import gradio as gr

iface = gr.Interface(fn=ask_ai, inputs="text", outputs="text" ,title="Chatbot")

iface.launch(share = True)