filipealmeida
commited on
Commit
•
83c9011
1
Parent(s):
49dcfb0
Increase context to 1024 and use larger example prompt.
Browse files
app.py
CHANGED
@@ -33,7 +33,7 @@ def generate_text(prompt, example):
|
|
33 |
|
34 |
logging.info(f"Input : {input}")
|
35 |
|
36 |
-
output_stream = llm(input, max_tokens=
|
37 |
|
38 |
full_text = ""
|
39 |
for output_chunk in output_stream:
|
@@ -44,14 +44,14 @@ def generate_text(prompt, example):
|
|
44 |
logging.info(f"Generated text: {full_text}")
|
45 |
|
46 |
model = download_model()
|
47 |
-
llm = Llama(model_path=model)
|
48 |
|
49 |
# Create a Gradio interface
|
50 |
interface = gr.Interface(
|
51 |
fn=generate_text,
|
52 |
inputs=[
|
53 |
gr.Textbox(lines=4, placeholder="Enter text to anonimize...", label="Text with PII",
|
54 |
-
value="My name is Filipe and my phone number is 555-121-2234. How are you
|
55 |
],
|
56 |
outputs=gr.Textbox(label="PII Sanitized version of the text"),
|
57 |
title="PII Sanitization Model",
|
|
|
33 |
|
34 |
logging.info(f"Input : {input}")
|
35 |
|
36 |
+
output_stream = llm(input, max_tokens=1024, stop=["</s>", "###"], stream=True)
|
37 |
|
38 |
full_text = ""
|
39 |
for output_chunk in output_stream:
|
|
|
44 |
logging.info(f"Generated text: {full_text}")
|
45 |
|
46 |
model = download_model()
|
47 |
+
llm = Llama(model_path=model, n_ctx=1024)
|
48 |
|
49 |
# Create a Gradio interface
|
50 |
interface = gr.Interface(
|
51 |
fn=generate_text,
|
52 |
inputs=[
|
53 |
gr.Textbox(lines=4, placeholder="Enter text to anonimize...", label="Text with PII",
|
54 |
+
value="My name is Filipe and my phone number is 555-121-2234. How are you?\nWant to meet up in Los Angeles at 5pm by the Grove?\nI live in downtown LA.")
|
55 |
],
|
56 |
outputs=gr.Textbox(label="PII Sanitized version of the text"),
|
57 |
title="PII Sanitization Model",
|