Spaces:
Runtime error
Runtime error
Commit
·
c4c07eb
1
Parent(s):
a448913
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,31 @@
|
|
1 |
-
|
2 |
import os
|
3 |
-
import
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
os.
|
6 |
|
7 |
-
|
8 |
-
model_path = "ggml-model-q4_0.bin"
|
9 |
-
llm = LlamaCpp(model_path=model_path)
|
10 |
|
11 |
-
|
12 |
-
response = llm(input_text)
|
13 |
-
return str(response)
|
14 |
|
15 |
-
|
|
|
16 |
|
17 |
-
|
|
|
|
|
|
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from huggingface_hub import login
|
4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
5 |
+
|
6 |
+
load_dotenv()
|
7 |
|
8 |
+
API_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
|
9 |
|
10 |
+
login(API_TOKEN)
|
|
|
|
|
11 |
|
12 |
+
model_id = "meta-llama/Llama-2-7b-chat-hf"
|
|
|
|
|
13 |
|
14 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True, device_map="auto")
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
16 |
|
17 |
+
generate_text_pipeline = pipeline(
|
18 |
+
model=model, tokenizer=tokenizer,
|
19 |
+
return_full_text=True,
|
20 |
+
task='text-generation',
|
21 |
|
22 |
+
temperature=0.1,
|
23 |
+
max_new_tokens=512,
|
24 |
+
repetition_penalty=1.1 # without this output begins repeating
|
25 |
+
)
|
26 |
+
def get_results(text):
|
27 |
+
res = generate_text_pipeline(text)
|
28 |
+
return res[0]["generated_text"]
|
29 |
|
30 |
+
iface = gr.Interface(fn=get_results, inputs="text", outputs="text")
|
31 |
+
iface.launch()
|