ejschwartz commited on
Commit
aa62017
·
verified ·
1 Parent(s): 858c1ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -1
app.py CHANGED
@@ -3,10 +3,16 @@ import spaces
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
- model_path = 'LLM4Binary/llm4decompile-6.7b-v2' # V2 Model
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
  model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16).cuda()
9
 
 
 
 
 
 
 
10
  @spaces.GPU
11
  def predict(input_asm):
12
  before = f"# This is the assembly code:\n"#prompt
 
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
+ model_path = 'LLM4Binary/llm4decompile-1.3b-v2' # V2 Model
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
  model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16).cuda()
9
 
10
+ description = """
11
+ # LLM4Decompile 1.3B V2
12
+
13
+ This is a space for testing the [LLM4Decompile 1.3B V2 model](https://huggingface.co/LLM4Binary/llm4decompile-1.3b-v2). It expects to be given a decompiled function output by Ghidra. I simply copy and paste from the Ghidra GUI, but this is not the method recommended by the official model page, so YMMV.
14
+ """
15
+
16
  @spaces.GPU
17
  def predict(input_asm):
18
  before = f"# This is the assembly code:\n"#prompt