File size: 1,348 Bytes
440d166
 
 
376d41a
440d166
 
 
8450044
440d166
 
376d41a
4642613
 
376d41a
4ba4563
376d41a
440d166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import gradio as gr
import transformers as t
import torch
import peft

# Load your fine-tuned model and tokenizer
tokenizer = t.AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-hf")
model = t.AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-7b-hf")
tokenizer.pad_token_id = 0

config = peft.LoraConfig(r=8, lora_alpha=16, target_modules=["q_proj", "v_proj"], lora_dropout=0.005, bias="none", task_type="CAUSAL_LM")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = peft.get_peft_model(model, config).to(device)

peft.set_peft_model_state_dict(model, torch.load(f".weights/adapter_model.bin"))

# Define a prediction function
def generate_article(title):
    prompt = f"Below is a title for an article. Write an article that appropriately suits the title: \n\n### Title:\n{title}\n\n### Article:\n"
    pipe = t.pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=1000)
    output = pipe([prompt])
    generated_article = output[0][0]["generated_text"]
    return generated_article

# Create a Gradio interface
iface = gr.Interface(
    fn=generate_article,
    inputs=gr.inputs.Textbox(lines=2, placeholder="Enter Article Title Here"),
    outputs="text",
    title="Article Generator",
    description="Enter a title to generate an article."
)

# Launch the app
iface.launch()