filipealmeida
commited on
Commit
•
2c40787
1
Parent(s):
1d42f08
Command line flag to chose a local model
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import re
|
|
4 |
from llama_cpp import Llama
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
import sys
|
7 |
-
import
|
8 |
|
9 |
# Set up logging
|
10 |
logging.basicConfig(level=logging.INFO)
|
@@ -43,8 +43,17 @@ def generate_text(prompt, example):
|
|
43 |
|
44 |
logging.info(f"Generated text: {full_text}")
|
45 |
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
# Create a Gradio interface
|
50 |
interface = gr.Interface(
|
|
|
4 |
from llama_cpp import Llama
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
import sys
|
7 |
+
import argparse
|
8 |
|
9 |
# Set up logging
|
10 |
logging.basicConfig(level=logging.INFO)
|
|
|
43 |
|
44 |
logging.info(f"Generated text: {full_text}")
|
45 |
|
46 |
+
|
47 |
+
parser = argparse.ArgumentParser()
|
48 |
+
parser.add_argument("--model", help="Path to the model file")
|
49 |
+
args = parser.parse_args()
|
50 |
+
|
51 |
+
if args.model:
|
52 |
+
model_path = args.model
|
53 |
+
else:
|
54 |
+
model_path = download_model()
|
55 |
+
|
56 |
+
llm = Llama(model_path=model_path)
|
57 |
|
58 |
# Create a Gradio interface
|
59 |
interface = gr.Interface(
|