File size: 1,422 Bytes
436b5af
8b7a527
c49e5fe
221985f
9b74e4d
 
 
c49e5fe
 
221985f
c49e5fe
9b74e4d
 
 
 
 
 
 
 
 
 
 
 
436b5af
221985f
c49e5fe
221985f
 
 
 
 
 
 
 
9b74e4d
 
 
221985f
 
9b74e4d
221985f
 
 
 
9b74e4d
 
8b7a527
221985f
8b7a527
 
221985f
 
3007bae
221985f
3007bae
8b7a527
 
 
c49e5fe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import gradio as gr
from transformers import pipeline
import logging
import re
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
import sys

# Set up logging
logging.basicConfig(level=logging.INFO)


model_repo="filipealmeida/open-llama-3b-v2-pii-transform"
model_filename="ggml-model-f16.gguf"

def download_model():
    print("Downloading model...")
    sys.stdout.flush()
    file = hf_hub_download(
            repo_id=model_repo, filename=model_filename
    )
    print("Downloaded " + file)
    return file

def generate_text(prompt, example):  
    logging.debug(f"Received prompt: {prompt}")
    input = f"""
### Instruction:
{prompt}
### Response:
"""

    logging.info(f"Input : {input}")

    output = llm(input, max_tokens=200, stop=["\n"])
    print(output)
    generated_text = output['choices'][0]['text']
    logging.info(f"Generated text: {generated_text}")

    parsed_text = generated_text.split("\n")[0]

    logging.info(f"Parsed text: {parsed_text}")
    return parsed_text

model = download_model()
llm = Llama(model_path=model)

# Create a Gradio interface
interface = gr.Interface(
  fn=generate_text,
  inputs=[
      gr.Textbox(lines=1, placeholder="Enter text to anonimize...", label="Prompt",
      value="My name is Filipe and my phone number is 555-121-2234. How are you?")
  ],
  outputs=gr.Textbox(label="Generated text")
)

# Launch the interface
interface.launch()