Spaces:
Sleeping
Sleeping
update.py
Browse files
app.py
CHANGED
@@ -8,41 +8,34 @@ logging.basicConfig(level=logging.DEBUG,
|
|
8 |
format='%(asctime)s - %(levelname)s - %(message)s')
|
9 |
logger = logging.getLogger(__name__)
|
10 |
|
11 |
-
#
|
12 |
-
|
13 |
-
"google/flan-t5-xxl", # Powerful instruction-following model
|
14 |
-
"bigscience/T0pp", # Optimized for zero-shot tasks
|
15 |
-
"t5-large", # General-purpose text generation
|
16 |
-
"google/flan-t5-large" # Lightweight instruction-tuned model
|
17 |
-
]
|
18 |
|
19 |
def load_model():
|
20 |
"""
|
21 |
-
|
22 |
"""
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
logger.error("All model attempts failed. No model loaded.")
|
45 |
-
return None
|
46 |
|
47 |
# Load the generator at startup
|
48 |
generator = load_model()
|
|
|
8 |
format='%(asctime)s - %(levelname)s - %(message)s')
|
9 |
logger = logging.getLogger(__name__)
|
10 |
|
11 |
+
# Model to use
|
12 |
+
MODEL_NAME = "google/flan-t5-large"
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
def load_model():
|
15 |
"""
|
16 |
+
Load the selected model and tokenizer using PyTorch.
|
17 |
"""
|
18 |
+
try:
|
19 |
+
logger.info(f"Loading model: {MODEL_NAME} with PyTorch backend")
|
20 |
+
|
21 |
+
# Load the model and tokenizer
|
22 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, framework="pt")
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
24 |
+
|
25 |
+
# Create the text generation pipeline
|
26 |
+
generator = pipeline(
|
27 |
+
"text2text-generation",
|
28 |
+
model=model,
|
29 |
+
tokenizer=tokenizer,
|
30 |
+
framework="pt", # Specify PyTorch framework
|
31 |
+
max_length=512,
|
32 |
+
num_return_sequences=1
|
33 |
+
)
|
34 |
+
logger.info(f"Successfully loaded model: {MODEL_NAME}")
|
35 |
+
return generator
|
36 |
+
except Exception as e:
|
37 |
+
logger.error(f"Failed to load model {MODEL_NAME}: {e}")
|
38 |
+
return None
|
|
|
|
|
39 |
|
40 |
# Load the generator at startup
|
41 |
generator = load_model()
|