sam2ai's picture
Update app.py
2ef266f
raw
history blame
5.11 kB
# import torch
from peft import PeftModel
import torch
import transformers
import gradio as gr
assert (
"LlamaTokenizer" in transformers._import_structure["models.llama"]
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
BASE_MODEL = "decapoda-research/llama-7b-hf"
LORA_WEIGHTS = "OdiaGenAI/odiagenAI-model-v0"
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
try:
if torch.backends.mps.is_available():
device = "mps"
except:
pass
if device == "cuda":
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
load_in_8bit=False,
torch_dtype=torch.float16,
device_map="auto",
)
model = PeftModel.from_pretrained(
model, LORA_WEIGHTS, torch_dtype=torch.float16, force_download=True
)
elif device == "mps":
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
device_map={"": device},
torch_dtype=torch.float16,
)
model = PeftModel.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
torch_dtype=torch.float16,
)
else:
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
)
model = PeftModel.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
)
def generate_prompt(instruction, input=None):
if input:
return f"""ନିମ୍ନରେ ଏକ ନିର୍ଦ୍ଦେଶନାମା ଯାହାକି ଏକ କାର୍ଯ୍ୟକୁ ବର୍ଣ୍ଣନା କରେ, ଏକ ଇନପୁଟ୍ ସହିତ ଯୋଡି ଯାହା ପରବର୍ତ୍ତୀ ପ୍ରସଙ୍ଗ ପ୍ରଦାନ କରେ | ଏକ ପ୍ରତିକ୍ରିୟା ଲେଖନ୍ତୁ ଯାହା ଅନୁରୋଧକୁ ସଠିକ୍ ଭାବରେ ସମାପ୍ତ କରେ |
### ନିର୍ଦ୍ଦେଶ:
{instruction}
### ଇନପୁଟ୍:
{input}
### ପ୍ରତିକ୍ରିୟା:"""
else:
return f"""ନିମ୍ନରେ ଏକ ନିର୍ଦ୍ଦେଶ ଯାହାକି ଏକ କାର୍ଯ୍ୟକୁ ବର୍ଣ୍ଣନା କରେ | ଏକ ପ୍ରତିକ୍ରିୟା ଲେଖନ୍ତୁ ଯାହା ଅନୁରୋଧକୁ ସଠିକ୍ ଭାବରେ ସମାପ୍ତ କରେ |
### ନିର୍ଦ୍ଦେଶ:
{instruction}
### ପ୍ରତିକ୍ରିୟା:"""
if device != "cpu":
model.half()
model.eval()
if torch.__version__ >= "2":
model = torch.compile(model)
def evaluate(
instruction,
input=None,
temperature=0.1,
top_p=0.75,
top_k=40,
num_beams=4,
max_new_tokens=128,
**kwargs,
):
prompt = generate_prompt(instruction, input)
print(prompt)
inputs = tokenizer(prompt, return_tensors="pt")
print(inputs)
input_ids = inputs["input_ids"].to(device)
print(input_ids)
generation_config = GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
**kwargs,
)
with torch.no_grad():
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens,
)
print(generation_output)
s = generation_output.sequences[0]
print(s)
output = tokenizer.decode(s)
print(output)
return output.split("### ପ୍ରତିକ୍ରିୟା:")[1].strip()
g = gr.Interface(
fn=evaluate,
inputs=[
gr.components.Textbox(
lines=2, label="Instruction", placeholder="Tell me about alpacas."
),
gr.components.Textbox(lines=2, label="Input", placeholder="none"),
gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"),
gr.components.Slider(
minimum=1, maximum=512, step=1, value=128, label="Max tokens"
),
],
outputs=[
gr.inputs.Textbox(
lines=5,
label="Output",
)
],
title="🦙🌲 OdiaGenAI-LoRA",
description="OdiaGenAI-LoRA is a 7B-parameter LLaMA model fine-tuned to follow Odia instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) Odia translated dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/shantipriyap/OdiaGenAI).",
)
g.queue(concurrency_count=1)
g.launch()