Spaces:
Runtime error
Runtime error
Commit
·
fecb58e
1
Parent(s):
bf348ee
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
@@ -1,32 +1,20 @@
|
|
1 |
|
2 |
-
## required lib, required "pip install"
|
3 |
-
# import transformers
|
4 |
-
# import accelerate
|
5 |
import openai
|
6 |
import llama_index
|
7 |
import torch
|
8 |
import cryptography
|
9 |
import cryptography.fernet
|
10 |
-
## interface libs, required "pip install"
|
11 |
import gradio
|
12 |
import huggingface_hub
|
13 |
import huggingface_hub.hf_api
|
14 |
-
## standard libs, no need to install
|
15 |
import json
|
16 |
-
import requests
|
17 |
import time
|
18 |
import os
|
19 |
import random
|
20 |
-
import re
|
21 |
import sys
|
22 |
import psutil
|
23 |
-
import threading
|
24 |
import socket
|
25 |
-
# import PIL
|
26 |
-
# import pandas
|
27 |
-
import matplotlib
|
28 |
class HFace_Pluto(object):
|
29 |
-
#
|
30 |
# initialize the object
|
31 |
def __init__(self, name="Pluto",*args, **kwargs):
|
32 |
super(HFace_Pluto, self).__init__(*args, **kwargs)
|
@@ -91,23 +79,6 @@ class HFace_Pluto(object):
|
|
91 |
y = x
|
92 |
return y
|
93 |
#
|
94 |
-
# fetch huggingface file
|
95 |
-
def fetch_hface_files(self,
|
96 |
-
hf_names,
|
97 |
-
hf_space="duchaba/monty",
|
98 |
-
local_dir="/content/"):
|
99 |
-
f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
|
100 |
-
try:
|
101 |
-
for f in hf_names:
|
102 |
-
lo = local_dir + f
|
103 |
-
huggingface_hub.hf_hub_download(repo_id=hf_space, filename=f,
|
104 |
-
use_auth_token=True,repo_type=huggingface_hub.REPO_TYPE_SPACE,
|
105 |
-
force_filename=lo)
|
106 |
-
except:
|
107 |
-
self._pp("*Error", f)
|
108 |
-
return
|
109 |
-
#
|
110 |
-
#
|
111 |
def push_hface_files(self,
|
112 |
hf_names,
|
113 |
hf_space="duchaba/skin_cancer_diagnose",
|
@@ -133,50 +104,12 @@ class HFace_Pluto(object):
|
|
133 |
repo_type="space")
|
134 |
return
|
135 |
#
|
136 |
-
# Define a function to display available CPU and RAM
|
137 |
-
def fetch_system_info(self):
|
138 |
-
s=''
|
139 |
-
# Get CPU usage as a percentage
|
140 |
-
cpu_usage = psutil.cpu_percent()
|
141 |
-
# Get available memory in bytes
|
142 |
-
mem = psutil.virtual_memory()
|
143 |
-
# Convert bytes to gigabytes
|
144 |
-
mem_total_gb = mem.total / (1024 ** 3)
|
145 |
-
mem_available_gb = mem.available / (1024 ** 3)
|
146 |
-
mem_used_gb = mem.used / (1024 ** 3)
|
147 |
-
# Print the results
|
148 |
-
s += f"CPU usage: {cpu_usage}%\n"
|
149 |
-
s += f"Total memory: {mem_total_gb:.2f} GB\n"
|
150 |
-
s += f"Available memory: {mem_available_gb:.2f} GB\n"
|
151 |
-
# print(f"Used memory: {mem_used_gb:.2f} GB")
|
152 |
-
s += f"Memory usage: {mem_used_gb/mem_total_gb:.2f}%\n"
|
153 |
-
return s
|
154 |
-
#
|
155 |
-
def restart_script_periodically(self):
|
156 |
-
while True:
|
157 |
-
#random_time = random.randint(540, 600)
|
158 |
-
random_time = random.randint(15800, 21600)
|
159 |
-
time.sleep(random_time)
|
160 |
-
os.execl(sys.executable, sys.executable, *sys.argv)
|
161 |
-
return
|
162 |
-
#
|
163 |
def write_file(self,fname, txt):
|
164 |
f = open(fname, "w")
|
165 |
f.writelines("\n".join(txt))
|
166 |
f.close()
|
167 |
return
|
168 |
#
|
169 |
-
def fetch_gpu_info(self):
|
170 |
-
s=''
|
171 |
-
try:
|
172 |
-
s += f'Your GPU is the {torch.cuda.get_device_name(0)}\n'
|
173 |
-
s += f'GPU ready staus {torch.cuda.is_available()}\n'
|
174 |
-
s += f'GPU allocated RAM: {round(torch.cuda.memory_allocated(0)/1024**3,1)} GB\n'
|
175 |
-
s += f'GPU reserved RAM {round(torch.cuda.memory_reserved(0)/1024**3,1)} GB\n'
|
176 |
-
except Exception as e:
|
177 |
-
s += f'**Warning, No GPU: {e}'
|
178 |
-
return s
|
179 |
-
#
|
180 |
def _fetch_crypt(self,is_generate=False):
|
181 |
s=self._fkey[::-1]
|
182 |
if (is_generate):
|
@@ -208,25 +141,6 @@ class HFace_Pluto(object):
|
|
208 |
self._ph()
|
209 |
return
|
210 |
#
|
211 |
-
def _fetch_version(self):
|
212 |
-
s = ''
|
213 |
-
# print(f"{'torch: 2.0.1':<25} Actual: {torch.__version__}")
|
214 |
-
# print(f"{'transformers: 4.29.2':<25} Actual: {transformers.__version__}")
|
215 |
-
s += f"{'openai: 0.27.7,':<28} Actual: {openai.__version__}\n"
|
216 |
-
s += f"{'huggingface_hub: 0.14.1,':<28} Actual: {huggingface_hub.__version__}\n"
|
217 |
-
s += f"{'gradio: 3.32.0,':<28} Actual: {gradio.__version__}\n"
|
218 |
-
s += f"{'cryptography: 3.34.0,':<28} Actual: {cryptography.__version__}\n"
|
219 |
-
s += f"{'llama_index: 0.6.21.post1,':<28} Actual: {llama_index.__version__}\n"
|
220 |
-
return s
|
221 |
-
#
|
222 |
-
def _fetch_host_ip(self):
|
223 |
-
s=''
|
224 |
-
hostname = socket.gethostname()
|
225 |
-
ip_address = socket.gethostbyname(hostname)
|
226 |
-
s += f"Hostname: {hostname}\n"
|
227 |
-
s += f"IP Address: {ip_address}\n"
|
228 |
-
return s
|
229 |
-
#
|
230 |
def _setup_openai(self,key=None):
|
231 |
if (key is None):
|
232 |
key = self._decrypt_it(self._gpt_key)
|
@@ -259,28 +173,9 @@ def add_method(cls):
|
|
259 |
#
|
260 |
monty = HFace_Pluto("Monty")
|
261 |
monty._login_hface()
|
262 |
-
# print(monty._fetch_version())
|
263 |
-
# monty._ph()
|
264 |
-
# print(monty.fetch_system_info())
|
265 |
-
# monty._ph()
|
266 |
-
# print(monty.fetch_gpu_info())
|
267 |
-
# monty._ph()
|
268 |
-
# print(monty._fetch_host_ip())
|
269 |
monty._ph()
|
270 |
monty._setup_openai()
|
271 |
|
272 |
-
@add_method(HFace_Pluto)
|
273 |
-
def gen_llama_index(self, doc_path, vindex='vector_index', vpath='./index_storage'):
|
274 |
-
# load doc
|
275 |
-
doc = llama_index.SimpleDirectoryReader(doc_path).load_data()
|
276 |
-
# need openai key
|
277 |
-
self._llama_index_doc = llama_index.VectorStoreIndex.from_documents(doc)
|
278 |
-
# save index to disk
|
279 |
-
self._llama_index_doc.set_index_id(vindex)
|
280 |
-
self._llama_index_doc.storage_context.persist(vpath)
|
281 |
-
print(f'Index doc are: {self._fetch_index_files(self._llama_index_doc)}')
|
282 |
-
return
|
283 |
-
|
284 |
@add_method(HFace_Pluto)
|
285 |
def load_llama_index(self,vindex='vector_index',vpath='./index_storage'):
|
286 |
try:
|
@@ -297,49 +192,26 @@ def load_llama_index(self,vindex='vector_index',vpath='./index_storage'):
|
|
297 |
monty.load_llama_index()
|
298 |
|
299 |
@add_method(HFace_Pluto)
|
300 |
-
def ask_me(self, p, ll_sign_in_member='Girish'):
|
301 |
-
|
302 |
-
if (self._llama_query_engine is None):
|
303 |
-
self._llama_query_engine = self._llama_index_doc.as_query_engine()
|
304 |
-
|
305 |
ll_engine = self._llama_query_engine
|
306 |
|
307 |
px = f'My name is {ll_sign_in_member}, and I want answer to the following: {p}.'
|
308 |
print("##### " + px)
|
|
|
309 |
resp = ll_engine.query(px)
|
310 |
-
|
311 |
return resp
|
312 |
-
# @add_method(HFace_Pluto)
|
313 |
-
def ask_me(self, p):
|
314 |
-
if (self._llama_query_engine is None):
|
315 |
-
self._llama_query_engine = self._llama_index_doc.as_chat_engine()
|
316 |
-
resp = self._llama_query_engine.chat(p)
|
317 |
-
return resp
|
318 |
-
|
319 |
-
p = 'How is my Humana plan chaning?'
|
320 |
-
resp = monty.ask_me(p)
|
321 |
-
print(resp)
|
322 |
-
|
323 |
-
gradio.Interface(fn=monty.ask_me,
|
324 |
-
inputs=in_box,
|
325 |
-
outputs=out_box,
|
326 |
-
examples=exp,
|
327 |
-
title=title,
|
328 |
-
description=desc,
|
329 |
-
article=arti,
|
330 |
-
flagging_options=flag_options).launch(debug=True)
|
331 |
|
332 |
in_box = [gradio.Textbox(lines=1, label="Your Humana request", placeholder="Your Humana request...see example if you need help.")
|
333 |
-
,gradio.Radio(["
|
334 |
-
,gradio.Radio(["Humana"
|
335 |
]
|
336 |
out_box = [gradio.Textbox(label="Humana response:")]
|
337 |
#
|
338 |
|
339 |
-
title = "Humana and
|
340 |
desc = '*Note: This model is fine-tuned by YML using GPT3.5 as the base LLM.'
|
341 |
-
arti =
|
342 |
-
arti += '<li><i>**Note: You can add more documentation. The more documentation the model has the smarter it will be.</i></li></ul>'
|
343 |
exp = [
|
344 |
['Tell me the Humana Gold Plus plan.'],
|
345 |
['Please write a summary in bullet point of the Humana Gold Plus SNP-DE H0028-015 (HMO-POS D-SNP) Annual Notice of Changes for 2023.'],
|
@@ -362,5 +234,4 @@ gradio.Interface(fn=monty.ask_me,
|
|
362 |
examples=exp,
|
363 |
title=title,
|
364 |
description=desc,
|
365 |
-
article=arti
|
366 |
-
flagging_options=flag_options).launch(debug=True)
|
|
|
1 |
|
|
|
|
|
|
|
2 |
import openai
|
3 |
import llama_index
|
4 |
import torch
|
5 |
import cryptography
|
6 |
import cryptography.fernet
|
|
|
7 |
import gradio
|
8 |
import huggingface_hub
|
9 |
import huggingface_hub.hf_api
|
|
|
10 |
import json
|
|
|
11 |
import time
|
12 |
import os
|
13 |
import random
|
|
|
14 |
import sys
|
15 |
import psutil
|
|
|
16 |
import socket
|
|
|
|
|
|
|
17 |
class HFace_Pluto(object):
|
|
|
18 |
# initialize the object
|
19 |
def __init__(self, name="Pluto",*args, **kwargs):
|
20 |
super(HFace_Pluto, self).__init__(*args, **kwargs)
|
|
|
79 |
y = x
|
80 |
return y
|
81 |
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
def push_hface_files(self,
|
83 |
hf_names,
|
84 |
hf_space="duchaba/skin_cancer_diagnose",
|
|
|
104 |
repo_type="space")
|
105 |
return
|
106 |
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
def write_file(self,fname, txt):
|
108 |
f = open(fname, "w")
|
109 |
f.writelines("\n".join(txt))
|
110 |
f.close()
|
111 |
return
|
112 |
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
def _fetch_crypt(self,is_generate=False):
|
114 |
s=self._fkey[::-1]
|
115 |
if (is_generate):
|
|
|
141 |
self._ph()
|
142 |
return
|
143 |
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
def _setup_openai(self,key=None):
|
145 |
if (key is None):
|
146 |
key = self._decrypt_it(self._gpt_key)
|
|
|
173 |
#
|
174 |
monty = HFace_Pluto("Monty")
|
175 |
monty._login_hface()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
monty._ph()
|
177 |
monty._setup_openai()
|
178 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
@add_method(HFace_Pluto)
|
180 |
def load_llama_index(self,vindex='vector_index',vpath='./index_storage'):
|
181 |
try:
|
|
|
192 |
monty.load_llama_index()
|
193 |
|
194 |
@add_method(HFace_Pluto)
|
195 |
+
def ask_me(self, p, ll_sign_in_member='Girish' , ll_engine='Humana'):
|
196 |
+
self._llama_query_engine = self._llama_index_doc.as_query_engine()
|
|
|
|
|
|
|
197 |
ll_engine = self._llama_query_engine
|
198 |
|
199 |
px = f'My name is {ll_sign_in_member}, and I want answer to the following: {p}.'
|
200 |
print("##### " + px)
|
201 |
+
|
202 |
resp = ll_engine.query(px)
|
|
|
203 |
return resp
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
in_box = [gradio.Textbox(lines=1, label="Your Humana request", placeholder="Your Humana request...see example if you need help.")
|
206 |
+
,gradio.Radio(["Girish"], label="Login Member", value='Girish', info="Who had login?")
|
207 |
+
,gradio.Radio(["Humana"], label="Login Member", value='Humana', info="Fine-Tune LLM for:")
|
208 |
]
|
209 |
out_box = [gradio.Textbox(label="Humana response:")]
|
210 |
#
|
211 |
|
212 |
+
title = "Humana and C&T Fine-tune LLM model"
|
213 |
desc = '*Note: This model is fine-tuned by YML using GPT3.5 as the base LLM.'
|
214 |
+
arti = '<li><i>**Note: You can add more documentation. The more documentation the model has the smarter it will be.</i></li></ul>'
|
|
|
215 |
exp = [
|
216 |
['Tell me the Humana Gold Plus plan.'],
|
217 |
['Please write a summary in bullet point of the Humana Gold Plus SNP-DE H0028-015 (HMO-POS D-SNP) Annual Notice of Changes for 2023.'],
|
|
|
234 |
examples=exp,
|
235 |
title=title,
|
236 |
description=desc,
|
237 |
+
article=arti).launch(debug=True)
|
|