Spaces:
Runtime error
Runtime error
import torch | |
from peft import PeftModel | |
import transformers | |
import gradio as gr | |
from typing import Iterable | |
from gradio.themes.base import Base | |
from gradio.themes.utils import colors, fonts, sizes | |
assert ( | |
"LlamaTokenizer" in transformers._import_structure["models.llama"] | |
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git" | |
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig | |
tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf") | |
BASE_MODEL = "decapoda-research/llama-7b-hf" | |
LORA_WEIGHTS = "lifeofcoding/alpaca-lora-movie-review-sentiment" | |
if torch.cuda.is_available(): | |
device = "cuda" | |
else: | |
device = "cpu" | |
try: | |
if torch.backends.mps.is_available(): | |
device = "mps" | |
except: | |
pass | |
if device == "cuda": | |
model = LlamaForCausalLM.from_pretrained( | |
BASE_MODEL, | |
load_in_8bit=False, | |
torch_dtype=torch.float16, | |
device_map="auto", | |
) | |
model = PeftModel.from_pretrained( | |
model, LORA_WEIGHTS, torch_dtype=torch.float16, force_download=True | |
) | |
elif device == "mps": | |
model = LlamaForCausalLM.from_pretrained( | |
BASE_MODEL, | |
device_map={"": device}, | |
torch_dtype=torch.float16, | |
) | |
model = PeftModel.from_pretrained( | |
model, | |
LORA_WEIGHTS, | |
device_map={"": device}, | |
torch_dtype=torch.float16, | |
) | |
else: | |
model = LlamaForCausalLM.from_pretrained( | |
BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True | |
) | |
model = PeftModel.from_pretrained( | |
model, | |
LORA_WEIGHTS, | |
device_map={"": device}, | |
) | |
def generate_prompt(instruction, input=None): | |
if input: | |
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. | |
### Instruction: | |
{instruction} | |
### Input: | |
{input} | |
### Response:""" | |
else: | |
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. | |
### Instruction: | |
{instruction} | |
### Response:""" | |
if device != "cpu": | |
model.half() | |
model.eval() | |
if torch.__version__ >= "2": | |
model = torch.compile(model) | |
def evaluate( | |
instruction, | |
input=None, | |
temperature=0.1, | |
top_p=0.75, | |
top_k=40, | |
num_beams=4, | |
max_new_tokens=128, | |
**kwargs, | |
): | |
prompt = generate_prompt(instruction, input) | |
inputs = tokenizer(prompt, return_tensors="pt") | |
input_ids = inputs["input_ids"].to(device) | |
generation_config = GenerationConfig( | |
temperature=temperature, | |
top_p=top_p, | |
top_k=top_k, | |
num_beams=num_beams, | |
**kwargs, | |
) | |
with torch.no_grad(): | |
generation_output = model.generate( | |
input_ids=input_ids, | |
generation_config=generation_config, | |
return_dict_in_generate=True, | |
output_scores=True, | |
max_new_tokens=max_new_tokens, | |
) | |
s = generation_output.sequences[0] | |
output = tokenizer.decode(s) | |
return output.split("### Response:")[1].strip() | |
ins = '''Below is an instruction that describes a task. Write a response that appropriately completes the request. | |
### Instruction: | |
{} | |
### Response: | |
''' | |
theme = gr.themes.Monochrome( | |
primary_hue="indigo", | |
secondary_hue="blue", | |
neutral_hue="slate", | |
radius_size=gr.themes.sizes.radius_sm, | |
font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"], | |
) | |
examples = [ | |
"Instead of making a peanut butter and jelly sandwich, what else could I combine peanut butter with in a sandwich? Give five ideas", | |
"How do I make a campfire?", | |
"Explain to me the difference between nuclear fission and fusion.", | |
"Write an ad for sale Nikon D750." | |
] | |
def process_example(args): | |
for x in evaluate(args): | |
pass | |
return x | |
css = ".generating {visibility: hidden}" | |
# Based on the gradio theming guide and borrowed from https://huggingface.co/spaces/shivi/dolly-v2-demo | |
class SeafoamCustom(Base): | |
def __init__( | |
self, | |
*, | |
primary_hue: colors.Color | str = colors.emerald, | |
secondary_hue: colors.Color | str = colors.blue, | |
neutral_hue: colors.Color | str = colors.blue, | |
spacing_size: sizes.Size | str = sizes.spacing_md, | |
radius_size: sizes.Size | str = sizes.radius_md, | |
font: fonts.Font | |
| str | |
| Iterable[fonts.Font | str] = ( | |
fonts.GoogleFont("Quicksand"), | |
"ui-sans-serif", | |
"sans-serif", | |
), | |
font_mono: fonts.Font | |
| str | |
| Iterable[fonts.Font | str] = ( | |
fonts.GoogleFont("IBM Plex Mono"), | |
"ui-monospace", | |
"monospace", | |
), | |
): | |
super().__init__( | |
primary_hue=primary_hue, | |
secondary_hue=secondary_hue, | |
neutral_hue=neutral_hue, | |
spacing_size=spacing_size, | |
radius_size=radius_size, | |
font=font, | |
font_mono=font_mono, | |
) | |
super().set( | |
button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)", | |
button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)", | |
button_primary_text_color="white", | |
button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)", | |
block_shadow="*shadow_drop_lg", | |
button_shadow="*shadow_drop_lg", | |
input_background_fill="zinc", | |
input_border_color="*secondary_300", | |
input_shadow="*shadow_drop", | |
input_shadow_focus="*shadow_drop_lg", | |
) | |
seafoam = SeafoamCustom() | |
with gr.Blocks(theme=seafoam, analytics_enabled=False, css=css) as demo: | |
with gr.Column(): | |
gr.Markdown( | |
""" ## Alpaca-LoRa | |
7b quantized 4bit (q4_1) | |
Type in the box below and click the button to generate answers to your most pressing questions! | |
""" | |
) | |
with gr.Row(): | |
with gr.Column(scale=3): | |
instruction = gr.Textbox(placeholder="Enter your question here", label="Question", elem_id="q-input") | |
with gr.Box(): | |
gr.Markdown("**Answer**") | |
output = gr.Markdown(elem_id="q-output") | |
submit = gr.Button("Generate", variant="primary") | |
gr.Examples( | |
examples=examples, | |
inputs=[instruction], | |
cache_examples=False, | |
fn=process_example, | |
outputs=[output], | |
) | |
submit.click(evaluate, inputs=[instruction], outputs=[output]) | |
instruction.submit(evaluate, inputs=[instruction], outputs=[output]) | |
demo.queue(concurrency_count=1).launch(debug=True) |