Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -15,11 +15,26 @@ import subprocess
|
|
15 |
import webbrowser
|
16 |
import urllib.parse
|
17 |
import http.client
|
|
|
|
|
18 |
|
19 |
# Suppress warnings
|
20 |
-
warnings.filterwarnings('ignore', category=
|
21 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
def initialize_environment():
|
24 |
"""Initialize application environment and configurations"""
|
25 |
# Create necessary directories
|
@@ -116,7 +131,8 @@ class GitHubBot:
|
|
116 |
|
117 |
def __init__(self):
|
118 |
self.github_api = None
|
119 |
-
|
|
|
120 |
"""Initialize GitHub API with token"""
|
121 |
self.github_api = GitHubAPI(token)
|
122 |
|
@@ -362,6 +378,9 @@ def signal_handler(signum, frame):
|
|
362 |
sys.exit(0)
|
363 |
|
364 |
if __name__ == "__main__":
|
|
|
|
|
|
|
365 |
# Register cleanup handlers
|
366 |
atexit.register(cleanup)
|
367 |
signal.signal(signal.SIGINT, signal_handler)
|
@@ -394,116 +413,4 @@ if __name__ == "__main__":
|
|
394 |
logger.error(f"Error launching application: {str(e)}")
|
395 |
raise
|
396 |
finally:
|
397 |
-
logger.info("Application shutdown")
|
398 |
-
|
399 |
-
from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
|
400 |
-
|
401 |
-
class HuggingFaceModel:
|
402 |
-
"""Class to handle Hugging Face model loading and predictions"""
|
403 |
-
|
404 |
-
def __init__(self, model_name: str):
|
405 |
-
self.model_name = model_name
|
406 |
-
self.model = None
|
407 |
-
self.tokenizer = None
|
408 |
-
self.load_model()
|
409 |
-
|
410 |
-
def load_model(self):
|
411 |
-
"""Load the Hugging Face model and tokenizer"""
|
412 |
-
try:
|
413 |
-
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
414 |
-
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name)
|
415 |
-
logger.info(f"Model {self.model_name} loaded successfully.")
|
416 |
-
except Exception as e:
|
417 |
-
logger.error(f"Error loading model {self.model_name}: {str(e)}")
|
418 |
-
raise
|
419 |
-
|
420 |
-
def predict(self, text: str) -> Dict:
|
421 |
-
"""Make a prediction using the loaded model"""
|
422 |
-
try:
|
423 |
-
inputs = self.tokenizer(text, return_tensors="pt")
|
424 |
-
outputs = self.model(**inputs)
|
425 |
-
predictions = outputs.logits.argmax(dim=-1).item()
|
426 |
-
logger.info(f"Prediction made for input: {text} with result: {predictions}")
|
427 |
-
return {"prediction": predictions}
|
428 |
-
except Exception as e:
|
429 |
-
logger.error(f"Error making prediction: {str(e)}")
|
430 |
-
return {"error": str(e)}
|
431 |
-
|
432 |
-
# Update the Gradio interface to include model loading and prediction
|
433 |
-
def create_gradio_interface():
|
434 |
-
"""Create and configure Gradio interface with custom styling"""
|
435 |
-
bot = GitHubBot()
|
436 |
-
hf_model = None # Initialize Hugging Face model variable
|
437 |
-
|
438 |
-
with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
|
439 |
-
# Existing Gradio components...
|
440 |
-
|
441 |
-
model_name = gr.Textbox(
|
442 |
-
label="Hugging Face Model Name",
|
443 |
-
placeholder="Enter the model name (e.g., 'distilbert-base-uncased')",
|
444 |
-
elem_classes="input input-bordered input-primary"
|
445 |
-
)
|
446 |
-
|
447 |
-
load_model_button = gr.Button(
|
448 |
-
"Load Model",
|
449 |
-
elem_classes="button button-primary"
|
450 |
-
)
|
451 |
-
|
452 |
-
prediction_text = gr.Textbox(
|
453 |
-
label="Input Text for Prediction",
|
454 |
-
placeholder="Enter text to classify...",
|
455 |
-
elem_classes="textarea textarea-primary"
|
456 |
-
)
|
457 |
-
|
458 |
-
predict_button = gr.Button(
|
459 |
-
"Make Prediction",
|
460 |
-
elem_classes="button button-success"
|
461 |
-
)
|
462 |
-
|
463 |
-
output_prediction = gr.Textbox(
|
464 |
-
label="Prediction Output",
|
465 |
-
interactive=False,
|
466 |
-
elem_classes="output-area"
|
467 |
-
)
|
468 |
-
|
469 |
-
# Load model handler
|
470 |
-
def load_model_handler(model_name_input):
|
471 |
-
nonlocal hf_model
|
472 |
-
try:
|
473 |
-
hf_model = HuggingFaceModel(model_name_input)
|
474 |
-
return f"Model {model_name_input} loaded successfully."
|
475 |
-
except Exception as e:
|
476 |
-
return f"Error loading model: {str(e)}"
|
477 |
-
|
478 |
-
# Prediction handler
|
479 |
-
def predict_handler(input_text):
|
480 |
-
if hf_model is None:
|
481 |
-
return "Model not loaded. Please load a model first."
|
482 |
-
result = hf_model.predict(input_text)
|
483 |
-
return result
|
484 |
-
|
485 |
-
# Connect components
|
486 |
-
load_model_button.click(
|
487 |
-
load_model_handler,
|
488 |
-
inputs=[model_name],
|
489 |
-
outputs=[output_prediction]
|
490 |
-
)
|
491 |
-
|
492 |
-
predict_button.click(
|
493 |
-
predict_handler,
|
494 |
-
inputs=[prediction_text],
|
495 |
-
outputs=[output_prediction]
|
496 |
-
)
|
497 |
-
|
498 |
-
return demo
|
499 |
-
|
500 |
-
def initialize_zero_gpu():
|
501 |
-
"""Initialize Hugging Face ZeroGPU"""
|
502 |
-
try:
|
503 |
-
import torch
|
504 |
-
torch.cuda.is_available()
|
505 |
-
print("Hugging Face ZeroGPU is not available. Using GPU instead.")
|
506 |
-
except Exception as e:
|
507 |
-
print(f"Error initializing Hugging Face ZeroGPU: {str(e)}")
|
508 |
-
|
509 |
-
initialize_zero_gpu()
|
|
|
15 |
import webbrowser
|
16 |
import urllib.parse
|
17 |
import http.client
|
18 |
+
import torch
|
19 |
+
import deepspeed # Import DeepSpeed for ZeroGPU
|
20 |
|
21 |
# Suppress warnings
|
22 |
+
warnings.filterwarnings('ignore', category=User Warning)
|
23 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
24 |
|
25 |
+
def initialize_zero_gpu():
|
26 |
+
"""Initialize Hugging Face ZeroGPU"""
|
27 |
+
try:
|
28 |
+
# Check if CUDA is available
|
29 |
+
if torch.cuda.is_available():
|
30 |
+
# Initialize DeepSpeed with Zero Optimization
|
31 |
+
deepspeed.init_distributed()
|
32 |
+
print("ZeroGPU initialized successfully.")
|
33 |
+
else:
|
34 |
+
print("CUDA is not available. Using CPU instead.")
|
35 |
+
except Exception as e:
|
36 |
+
print(f"Error initializing Hugging Face ZeroGPU: {str(e)}")
|
37 |
+
|
38 |
def initialize_environment():
|
39 |
"""Initialize application environment and configurations"""
|
40 |
# Create necessary directories
|
|
|
131 |
|
132 |
def __init__(self):
|
133 |
self.github_api = None
|
134 |
+
|
135 |
+
def initialize _api(self, token: str):
|
136 |
"""Initialize GitHub API with token"""
|
137 |
self.github_api = GitHubAPI(token)
|
138 |
|
|
|
378 |
sys.exit(0)
|
379 |
|
380 |
if __name__ == "__main__":
|
381 |
+
# Initialize ZeroGPU
|
382 |
+
initialize_zero_gpu()
|
383 |
+
|
384 |
# Register cleanup handlers
|
385 |
atexit.register(cleanup)
|
386 |
signal.signal(signal.SIGINT, signal_handler)
|
|
|
413 |
logger.error(f"Error launching application: {str(e)}")
|
414 |
raise
|
415 |
finally:
|
416 |
+
logger.info("Application shutdown")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|