ejschwartz commited on
Commit
7308fb9
·
verified ·
1 Parent(s): 72152eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -3,14 +3,14 @@ import spaces
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
- model_path = 'LLM4Binary/llm4decompile-6.7b-v2' # V2 Model
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
  model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16).cuda()
9
 
10
- description = """
11
- # LLM4Decompile 6.7B V2
12
 
13
- This is a space for testing the [LLM4Decompile 6.7B V2 model](https://huggingface.co/LLM4Binary/llm4decompile-6.7b-v2). It expects to be given a decompiled function output by Ghidra. I simply copy and paste from the Ghidra GUI, but this is not the method recommended by the official model page, which could influence performance. I am not affiliated with the authors.
14
  """
15
 
16
  @spaces.GPU(duration=120)
 
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
+ model_path = 'LLM4Binary/llm4decompile-9b-v2' # V2 Model
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
  model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16).cuda()
9
 
10
+ description = f"""
11
+ # LLM4Decompile 9B V2
12
 
13
+ This is a space for testing the [LLM4Decompile 9B V2 model](https://huggingface.co/LLM4Binary/llm4decompile-9b-v2). It expects to be given a decompiled function output by Ghidra. I simply copy and paste from the Ghidra GUI, but this is not the method recommended by the official model page, which could influence performance. I am not affiliated with the authors.
14
  """
15
 
16
  @spaces.GPU(duration=120)