Yehor commited on
Commit
aa8cda8
·
verified ·
1 Parent(s): 53fc999

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -2
app.py CHANGED
@@ -1,18 +1,27 @@
1
  import spaces
 
2
  import torch
 
 
3
  import gradio as gr
4
 
5
  from peft import AutoPeftModelForCausalLM
6
  from transformers import AutoTokenizer, BitsAndBytesConfig
7
 
 
 
 
8
  max_seq_length = 2048
9
  dtype = (
10
  None
11
  )
12
  load_in_4bit = True
13
 
14
- quantization_config = BitsAndBytesConfig(load_in_4bit=True,
15
- bnb_4bit_compute_dtype=torch.float16)
 
 
 
16
 
17
  tokenizer = AutoTokenizer.from_pretrained("ua-l/gemma-2-9b-legal-uk")
18
  model = AutoPeftModelForCausalLM.from_pretrained(
 
1
  import spaces
2
+
3
  import torch
4
+ import torch._dynamo
5
+
6
  import gradio as gr
7
 
8
  from peft import AutoPeftModelForCausalLM
9
  from transformers import AutoTokenizer, BitsAndBytesConfig
10
 
11
+ torch._dynamo.config.suppress_errors = True
12
+ torch._dynamo.disable()
13
+
14
  max_seq_length = 2048
15
  dtype = (
16
  None
17
  )
18
  load_in_4bit = True
19
 
20
+
21
+ quantization_config = BitsAndBytesConfig(
22
+ load_in_4bit=True,
23
+ # bnb_4bit_compute_dtype=torch.float16,
24
+ )
25
 
26
  tokenizer = AutoTokenizer.from_pretrained("ua-l/gemma-2-9b-legal-uk")
27
  model = AutoPeftModelForCausalLM.from_pretrained(