Spaces:
Runtime error
Runtime error
Commit
·
0d5d23d
1
Parent(s):
52102b1
Update app.py
Browse files
app.py
CHANGED
@@ -7,10 +7,19 @@
|
|
7 |
#----------------------------------------------------------------------------------------------------------------------------
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
# print(generated_text)
|
15 |
|
16 |
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
|
|
7 |
#----------------------------------------------------------------------------------------------------------------------------
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
9 |
|
10 |
+
# Path to the model directory (assuming it's in the same directory as your script)
|
11 |
+
model_directory = "./"
|
|
|
12 |
|
13 |
+
# Load the model and tokenizer
|
14 |
+
model = AutoModelForCausalLM.from_pretrained(model_directory)
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_directory, trust_remote_code=True)
|
16 |
+
|
17 |
+
# Now you can generate text as before
|
18 |
+
# prompt = "What is a large language model?"
|
19 |
+
# input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
20 |
+
|
21 |
+
# output = model.generate(input_ids, max_length=200, num_return_sequences=1)
|
22 |
+
# generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
23 |
# print(generated_text)
|
24 |
|
25 |
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
|