Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, logging
|
3 |
|
4 |
# Ignore warnings
|
@@ -9,9 +8,10 @@ model_name = "King-Harry/NinjaMasker-PII-Redaction"
|
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
10 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
11 |
|
|
|
12 |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=100)
|
13 |
prompt = "My name is Harry and I live in Winnipeg. My phone number is ummm 204 no 203, ahh 4344, no 4355"
|
14 |
result = pipe(f"<s>[INST] {prompt} [/INST]")
|
15 |
|
16 |
# Print the generated text
|
17 |
-
print(result[0]['generated_text'])
|
|
|
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, logging
|
2 |
|
3 |
# Ignore warnings
|
|
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
10 |
|
11 |
+
# Generate text
|
12 |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=100)
|
13 |
prompt = "My name is Harry and I live in Winnipeg. My phone number is ummm 204 no 203, ahh 4344, no 4355"
|
14 |
result = pipe(f"<s>[INST] {prompt} [/INST]")
|
15 |
|
16 |
# Print the generated text
|
17 |
+
print(result[0]['generated_text'])
|