fastx commited on
Commit
bd6aadd
·
1 Parent(s): 557825a

Upload boot.py

Browse files
Files changed (1) hide show
  1. boot.py +140 -0
boot.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """custom_chatbot.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1xT4n5rN6yNyzf-CO8Pifz0yWCEI4VmjV
8
+
9
+ # Install the dependicies
10
+ Run the code below to install the depencies we need for our functions
11
+ """
12
+
13
+ # Commented out IPython magic to ensure Python compatibility.
14
+ # %pip install llama-index
15
+ # %pip install langchain
16
+ # %pip install gradio
17
+ # %pip install openai
18
+
19
+ """### **How to Train with your data. **
20
+ You can use your github repository link. Make sure repository name should be same as given repo.
21
+ """
22
+
23
+ !git clone https://github.com/talib-raath/context_data.git
24
+
25
+ """# Define the functions
26
+ The following code defines the functions we need to construct the index and query it
27
+ """
28
+
29
+ from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
30
+ from langchain import OpenAI
31
+ import sys
32
+ import os
33
+ from IPython.display import Markdown, display
34
+
35
+ def construct_index(directory_path):
36
+ # set maximum input size
37
+ max_input_size = 4096
38
+ # set number of output tokens
39
+ num_outputs = 2000
40
+ # set maximum chunk overlap
41
+ max_chunk_overlap = 20
42
+ # set chunk size limit
43
+ chunk_size_limit = 600
44
+
45
+ # define LLM
46
+ llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
47
+ prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
48
+
49
+ documents = SimpleDirectoryReader(directory_path).load_data()
50
+
51
+ index = GPTSimpleVectorIndex.from_documents(documents)
52
+
53
+ index.save_to_disk('index.json')
54
+
55
+ return index
56
+
57
+ def ask_ai():
58
+ index = GPTSimpleVectorIndex.load_from_disk('index.json')
59
+ while True:
60
+ query = input("What do you want to ask? ")
61
+ response = index.query(query, response_mode="compact")
62
+ display(Markdown(f"Response: <b>{response.response}</b>"))
63
+
64
+ """# Set OpenAI API Key
65
+ You can use this key also but it may expire if it does not work you can get his own api key
66
+
67
+ **Use this Key**
68
+
69
+
70
+ "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"
71
+
72
+ You need an OPENAI API key to be able to run this code.
73
+
74
+ If you don't have one yet, get it by [signing up](https://platform.openai.com/overview). Then click your account icon on the top right of the screen and select "View API Keys". Create an API key.
75
+
76
+ Then run the code below and paste your API key into the text input.
77
+ """
78
+
79
+ os.environ["OPENAI_API_KEY"] = input("Paste your OpenAI key here and hit enter:")
80
+
81
+ """#Construct an index
82
+ Now we are ready to construct the index. This will take every file in the folder 'data', split it into chunks, and embed it with OpenAI's embeddings API.
83
+
84
+ **Notice:** running this code will cost you credits on your OpenAPI account ($0.02 for every 1,000 tokens). If you've just set up your account, the free credits that you have should be more than enough for this experiment.
85
+ """
86
+
87
+ construct_index("context_data")
88
+
89
+ import tkinter as tk
90
+ from llama_index import GPTSimpleVectorIndex, LLMPredictor, PromptHelper
91
+ from langchain import OpenAI
92
+ from IPython.display import Markdown, display
93
+
94
+ # Define the ask_ai() function
95
+ def ask_ai(question):
96
+ index = GPTSimpleVectorIndex.load_from_disk('index.json')
97
+ response = index.query(question, response_mode="compact")
98
+ return response.response
99
+
100
+ # Define the GUI
101
+ class ChatBotGUI:
102
+ def __init__(self, master):
103
+ self.master = master
104
+ master.title("Chat Bot")
105
+
106
+ # Create a label and an entry for the question
107
+ self.label = tk.Label(master, text="Ask me anything:")
108
+ self.label.pack()
109
+
110
+ self.entry = tk.Entry(master)
111
+ self.entry.pack()
112
+
113
+ # Create a button to submit the question
114
+ self.button = tk.Button(master, text="Submit", command=self.submit_question)
115
+ self.button.pack()
116
+
117
+ # Create a text box to display the response
118
+ self.textbox = tk.Text(master)
119
+ self.textbox.pack()
120
+
121
+ def submit_question(self):
122
+ question = self.entry.get()
123
+ response = ask_ai(question)
124
+ self.textbox.insert(tk.END, "You: " + question + "\n")
125
+ self.textbox.insert(tk.END, "Bot: " + response + "\n\n")
126
+ self.entry.delete(0, tk.END)
127
+
128
+ # Create an instance of the GUI and start the main loop
129
+
130
+ '''
131
+ root = tk.Tk()
132
+ chatbot_gui = ChatBotGUI(root)
133
+ root.mainloop()
134
+ '''
135
+
136
+ import gradio as gr
137
+
138
+ iface = gr.Interface(fn=ask_ai, inputs="text", outputs="text" ,title="Chatbot")
139
+
140
+ iface.launch(share = True)