Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import spaces | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
model_path = 'LLM4Binary/llm4decompile-6.7b-v2' # V2 Model | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16).cuda() | |
def predict(input_asm): | |
before = f"# This is the assembly code:\n"#prompt | |
after = "\n# What is the source code?\n"#prompt | |
input_prompt = before+input_asm.strip()+after | |
inputs = tokenizer(input_prompt, return_tensors="pt").to(model.device) | |
with torch.no_grad(): | |
outputs = model.generate(**inputs, max_new_tokens=2048)### max length to 4096, max new tokens should be below the range | |
c_func_decompile = tokenizer.decode(outputs[0][len(inputs[0]):-1]) | |
return c_func_decompile | |
demo = gr.Interface(fn=predict, | |
examples=["void ioabs_tcp_pre_select(connection c, int *n, struct pollfd *pfds) { struct ioabs_tcp *io; io = (struct ioabs_tcp*)c->io; c->s_index = *n; (*n)++; pfds[c->s_index].fd = c->s; pfds[c->s_index].events |= 0x0001; if (((size_t)(((c->wrb)->put + (c->wrb)->len - (c->wrb)->get) % (c->wrb)->len)) > 0) pfds[c->s_index].events |= 0x0004; }"], | |
inputs="text", outputs="text") | |
demo.queue() | |
demo.launch() | |