File size: 1,581 Bytes
334fdf5
 
e0d816d
334fdf5
63b9234
c72165a
 
 
ebf8f51
c72165a
 
 
 
 
07099ff
 
 
 
 
 
c72165a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334fdf5
 
 
 
 
 
 
c72165a
 
 
 
 
 
 
 
334fdf5
 
c72165a
334fdf5
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse
import os
import json
#from huggingface_hub.file_download import http_get
from llama_cpp import Llama


directory = "/home/oluser/olapp/"
model_url = "https://huggingface.co/IlyaGusev/saiga_mistral_7b_gguf/resolve/main/model-q8_0.gguf"
model_name = "model-q8_0.gguf"
final_model_path = os.path.join(directory, model_name)

print("Downloading all files...")
#rm_files = [os.path.join(directory, f) for f in os.listdir(directory)]
#for f in rm_files:
#   if os.path.isfile(f):
#       os.remove(f)
#   else:
#       shutil.rmtree(f)

if not os.path.exists(final_model_path):
    with open(final_model_path, "wb") as f:
        http_get(model_url, f)
os.chmod(final_model_path, 0o777)
print("Files downloaded!")

model = Llama(
    model_path=final_model_path,
    n_ctx=4096,
    n_parts=1,
)

print("Model loaded!")


class OlHandler(BaseHTTPRequestHandler):

    def do_GET(self):
        query = urlparse(self.path).query
        query_components = dict(qc.split("=") for qc in query.split("&"))
        q = query_components["q"]
        # message = '-=# ' + q + ' #=-'

        output = llm(
          q,
          max_tokens=32, # Generate up to 32 tokens
          echo=False
        )
        
        self.send_response(200)
        self.end_headers()
        self.wfile.write(output.encode('utf-8'))
        return


if __name__ == '__main__':
    olserver = HTTPServer(('0.0.0.0', 7860), OlHandler)
    print('Starting server at http://0.0.0.0:7860')
    olserver.serve_forever()