ambrosemcduffy's picture
Update app.py
47eb7f1
import torch
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
peft_model_id = "ambrosemcduffy/bloom-1b7-lora-ads"
config = PeftConfig.from_pretrained(peft_model_id)
base_model = AutoModelForCausalLM.from_pretrained(
config.base_model_name_or_path,
return_dict=True,
load_in_8bit=False,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
# Load the Lora model
model = PeftModel.from_pretrained(base_model, peft_model_id)
def make_inference(question):
input_text = "### This is your question {}\n".format(question)
batch = tokenizer(input_text, return_tensors='pt')
with torch.cuda.amp.autocast():
output_tokens = model.generate(**batch, max_length=50, num_return_sequences=1)
return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
if __name__ == "__main__":
import gradio as gr
gr.Interface(
make_inference,
gr.inputs.Textbox(lines=2, label="Question"),
gr.outputs.Textbox(label="Answer"),
title="BlackQA",
description="Generated Text of Black heros",
).launch()