Sephfox commited on
Commit
45cd0a2
·
verified ·
1 Parent(s): d7afd24

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -17,6 +17,7 @@ from nltk.chunk import ne_chunk
17
  from textblob import TextBlob
18
  import matplotlib.pyplot as plt
19
  import seaborn as sns
 
20
 
21
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
22
 
@@ -62,7 +63,9 @@ emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/di
62
  # Load pre-trained large language model and tokenizer for response generation with increased context window
63
  response_model_name = "gpt2-xl"
64
  response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
65
- response_model = AutoModelForCausalLM.from_pretrained(response_model_name, torch_dtype=torch.float16, device_map="auto")
 
 
66
 
67
  # Set the pad token
68
  response_tokenizer.pad_token = response_tokenizer.eos_token
 
17
  from textblob import TextBlob
18
  import matplotlib.pyplot as plt
19
  import seaborn as sns
20
+ from accelerate import init_empty_weights, load_checkpoint_and_dispatch
21
 
22
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
23
 
 
63
  # Load pre-trained large language model and tokenizer for response generation with increased context window
64
  response_model_name = "gpt2-xl"
65
  response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
66
+ with init_empty_weights():
67
+ response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
68
+ response_model = load_checkpoint_and_dispatch(response_model)
69
 
70
  # Set the pad token
71
  response_tokenizer.pad_token = response_tokenizer.eos_token