Spaces:
Paused
Paused
Update olapp.py
Browse files
olapp.py
CHANGED
@@ -1,17 +1,17 @@
|
|
1 |
from http.server import HTTPServer, BaseHTTPRequestHandler
|
2 |
from urllib.parse import urlparse
|
3 |
-
import os
|
4 |
import json
|
5 |
#from huggingface_hub.file_download import http_get
|
6 |
from llama_cpp import Llama
|
7 |
|
8 |
|
9 |
-
directory = "/home/oluser/olapp/"
|
10 |
-
model_url = "https://huggingface.co/IlyaGusev/saiga_mistral_7b_gguf/resolve/main/model-q8_0.gguf"
|
11 |
-
model_name = "model-q8_0.gguf"
|
12 |
-
final_model_path = os.path.join(directory, model_name)
|
13 |
|
14 |
-
print("Downloading all files...")
|
15 |
#rm_files = [os.path.join(directory, f) for f in os.listdir(directory)]
|
16 |
#for f in rm_files:
|
17 |
# if os.path.isfile(f):
|
@@ -19,14 +19,17 @@ print("Downloading all files...")
|
|
19 |
# else:
|
20 |
# shutil.rmtree(f)
|
21 |
|
22 |
-
if not os.path.exists(final_model_path):
|
23 |
-
with open(final_model_path, "wb") as f:
|
24 |
-
http_get(model_url, f)
|
25 |
-
os.chmod(final_model_path, 0o777)
|
26 |
-
print("Files downloaded!")
|
|
|
|
|
27 |
|
28 |
model = Llama(
|
29 |
-
model_path=
|
|
|
30 |
n_ctx=4096,
|
31 |
n_parts=1,
|
32 |
)
|
|
|
1 |
from http.server import HTTPServer, BaseHTTPRequestHandler
|
2 |
from urllib.parse import urlparse
|
3 |
+
#import os
|
4 |
import json
|
5 |
#from huggingface_hub.file_download import http_get
|
6 |
from llama_cpp import Llama
|
7 |
|
8 |
|
9 |
+
#directory = "/home/oluser/olapp/"
|
10 |
+
#model_url = "https://huggingface.co/IlyaGusev/saiga_mistral_7b_gguf/resolve/main/model-q8_0.gguf"
|
11 |
+
#model_name = "model-q8_0.gguf"
|
12 |
+
#final_model_path = os.path.join(directory, model_name)
|
13 |
|
14 |
+
#print("Downloading all files...")
|
15 |
#rm_files = [os.path.join(directory, f) for f in os.listdir(directory)]
|
16 |
#for f in rm_files:
|
17 |
# if os.path.isfile(f):
|
|
|
19 |
# else:
|
20 |
# shutil.rmtree(f)
|
21 |
|
22 |
+
#if not os.path.exists(final_model_path):
|
23 |
+
# with open(final_model_path, "wb") as f:
|
24 |
+
# http_get(model_url, f)
|
25 |
+
#os.chmod(final_model_path, 0o777)
|
26 |
+
#print("Files downloaded!")
|
27 |
+
|
28 |
+
print("Loading model...")
|
29 |
|
30 |
model = Llama(
|
31 |
+
model_path="/home/oluser/olapp/model-q8_0.gguf",
|
32 |
+
#model_path=final_model_path,
|
33 |
n_ctx=4096,
|
34 |
n_parts=1,
|
35 |
)
|