Spaces:
Paused
Paused
from http.server import HTTPServer, BaseHTTPRequestHandler | |
from urllib.parse import urlparse | |
import os | |
import json | |
#from huggingface_hub.file_download import http_get | |
from llama_cpp import Llama | |
directory = "/home/oluser/olapp/" | |
model_url = "https://huggingface.co/IlyaGusev/saiga_mistral_7b_gguf/resolve/main/model-q8_0.gguf" | |
model_name = "model-q8_0.gguf" | |
final_model_path = os.path.join(directory, model_name) | |
print("Downloading all files...") | |
#rm_files = [os.path.join(directory, f) for f in os.listdir(directory)] | |
#for f in rm_files: | |
# if os.path.isfile(f): | |
# os.remove(f) | |
# else: | |
# shutil.rmtree(f) | |
if not os.path.exists(final_model_path): | |
with open(final_model_path, "wb") as f: | |
http_get(model_url, f) | |
os.chmod(final_model_path, 0o777) | |
print("Files downloaded!") | |
model = Llama( | |
model_path=final_model_path, | |
n_ctx=4096, | |
n_parts=1, | |
) | |
print("Model loaded!") | |
class OlHandler(BaseHTTPRequestHandler): | |
def do_GET(self): | |
query = urlparse(self.path).query | |
query_components = dict(qc.split("=") for qc in query.split("&")) | |
q = query_components["q"] | |
# message = '-=# ' + q + ' #=-' | |
output = llm( | |
q, | |
max_tokens=32, # Generate up to 32 tokens | |
echo=False | |
) | |
self.send_response(200) | |
self.end_headers() | |
self.wfile.write(output.encode('utf-8')) | |
return | |
if __name__ == '__main__': | |
olserver = HTTPServer(('0.0.0.0', 7860), OlHandler) | |
print('Starting server at http://0.0.0.0:7860') | |
olserver.serve_forever() | |