abdfajar707 commited on
Commit
dcb3707
·
verified ·
1 Parent(s): 528be03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -0
app.py CHANGED
@@ -2,6 +2,7 @@ from app import FastLanguageModel
2
  import torch
3
  import gradio as gr
4
 
 
5
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
6
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
7
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
 
2
  import torch
3
  import gradio as gr
4
 
5
+ #deklarasi
6
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
7
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
8
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.