Spaces:
Sleeping
Sleeping
Commit
·
188010c
1
Parent(s):
e158a1c
rollback
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import os
|
2 |
-
import time
|
3 |
from flask import Flask, jsonify, request
|
4 |
from flask_cors import CORS
|
5 |
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
@@ -10,13 +9,11 @@ os.environ["HF_HOME"] = "/workspace/huggingface_cache" # Change this to a writa
|
|
10 |
app = Flask(__name__)
|
11 |
|
12 |
# Enable CORS for specific origins
|
13 |
-
CORS(app, resources={r"
|
14 |
|
15 |
# Global variables for model and tokenizer
|
16 |
model = None
|
17 |
tokenizer = None
|
18 |
-
last_loaded_time = 0
|
19 |
-
COOLDOWN_PERIOD = 300 # Set your cooldown period to 5 minutes (300 seconds)
|
20 |
|
21 |
def get_model_and_tokenizer(model_id):
|
22 |
global model, tokenizer
|
@@ -26,29 +23,22 @@ def get_model_and_tokenizer(model_id):
|
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
|
27 |
tokenizer.pad_token = tokenizer.eos_token
|
28 |
|
29 |
-
print(f"Loading model for model_id: {model_id}")
|
30 |
# Load the model
|
31 |
-
model = AutoModelForCausalLM.from_pretrained(model_id)
|
32 |
model.config.use_cache = False
|
33 |
-
|
34 |
except Exception as e:
|
35 |
print(f"Error loading model: {e}")
|
36 |
|
37 |
-
def is_model_loaded_and_fresh():
|
38 |
-
global last_loaded_time
|
39 |
-
current_time = time.time()
|
40 |
-
return model is not None and (current_time - last_loaded_time) < COOLDOWN_PERIOD
|
41 |
-
|
42 |
def generate_response(user_input, model_id):
|
43 |
prompt = formatted_prompt(user_input)
|
44 |
|
45 |
global model, tokenizer
|
46 |
|
47 |
-
#
|
48 |
-
if
|
49 |
get_model_and_tokenizer(model_id) # Load model and tokenizer
|
50 |
-
global last_loaded_time
|
51 |
-
last_loaded_time = time.time() # Update the last load time
|
52 |
|
53 |
# Prepare the input tensors
|
54 |
inputs = tokenizer(prompt, return_tensors="pt") # Move inputs to GPU if available
|
|
|
1 |
import os
|
|
|
2 |
from flask import Flask, jsonify, request
|
3 |
from flask_cors import CORS
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
|
|
9 |
app = Flask(__name__)
|
10 |
|
11 |
# Enable CORS for specific origins
|
12 |
+
CORS(app, resources={r"api/predict/*": {"origins": ["http://localhost:3000", "https://main.dbn2ikif9ou3g.amplifyapp.com"]}})
|
13 |
|
14 |
# Global variables for model and tokenizer
|
15 |
model = None
|
16 |
tokenizer = None
|
|
|
|
|
17 |
|
18 |
def get_model_and_tokenizer(model_id):
|
19 |
global model, tokenizer
|
|
|
23 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
|
24 |
tokenizer.pad_token = tokenizer.eos_token
|
25 |
|
26 |
+
print(f"Loading model and for model_id: {model_id}")
|
27 |
# Load the model
|
28 |
+
model = AutoModelForCausalLM.from_pretrained(model_id) #, device_map="auto")
|
29 |
model.config.use_cache = False
|
30 |
+
|
31 |
except Exception as e:
|
32 |
print(f"Error loading model: {e}")
|
33 |
|
|
|
|
|
|
|
|
|
|
|
34 |
def generate_response(user_input, model_id):
|
35 |
prompt = formatted_prompt(user_input)
|
36 |
|
37 |
global model, tokenizer
|
38 |
|
39 |
+
# Load the model and tokenizer if they are not already loaded or if the model_id has changed
|
40 |
+
if model is None or tokenizer is None or (model.config._name_or_path != model_id):
|
41 |
get_model_and_tokenizer(model_id) # Load model and tokenizer
|
|
|
|
|
42 |
|
43 |
# Prepare the input tensors
|
44 |
inputs = tokenizer(prompt, return_tensors="pt") # Move inputs to GPU if available
|