Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@ import urllib.parse
|
|
17 |
import http.client
|
18 |
|
19 |
# Suppress warnings
|
20 |
-
warnings.filterwarnings('ignore', category=UserWarning)
|
21 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
22 |
|
23 |
def initialize_environment():
|
@@ -52,7 +52,7 @@ logger = initialize_environment()
|
|
52 |
|
53 |
class GitHubAPI:
|
54 |
"""GitHub API handler with rate limiting and error handling"""
|
55 |
-
|
56 |
def __init__(self, token: str):
|
57 |
self.token = token
|
58 |
self.headers = {
|
@@ -113,9 +113,9 @@ class GitHubAPI:
|
|
113 |
|
114 |
class GitHubBot:
|
115 |
"""Main GitHub bot implementation"""
|
116 |
-
|
117 |
def __init__(self):
|
118 |
-
self.github_api = None
|
119 |
def initialize_api(self, token: str):
|
120 |
"""Initialize GitHub API with token"""
|
121 |
self.github_api = GitHubAPI(token)
|
@@ -395,11 +395,12 @@ if __name__ == "__main__":
|
|
395 |
raise
|
396 |
finally:
|
397 |
logger.info("Application shutdown")
|
|
|
398 |
from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
|
399 |
|
400 |
class HuggingFaceModel:
|
401 |
"""Class to handle Hugging Face model loading and predictions"""
|
402 |
-
|
403 |
def __init__(self, model_name: str):
|
404 |
self.model_name = model_name
|
405 |
self.model = None
|
@@ -496,103 +497,13 @@ def create_gradio_interface():
|
|
496 |
|
497 |
return demo
|
498 |
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
self.tokenizer = None
|
508 |
-
self.load_model()
|
509 |
-
|
510 |
-
def load_model(self):
|
511 |
-
"""Load the Hugging Face model and tokenizer"""
|
512 |
-
try:
|
513 |
-
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
514 |
-
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name)
|
515 |
-
logger.info(f"Model {self.model_name} loaded successfully.")
|
516 |
-
except Exception as e:
|
517 |
-
logger.error(f"Error loading model {self.model_name}: {str(e)}")
|
518 |
-
raise
|
519 |
-
|
520 |
-
def predict(self, text: str) -> Dict:
|
521 |
-
"""Make a prediction using the loaded model"""
|
522 |
-
try:
|
523 |
-
inputs = self.tokenizer(text, return_tensors="pt")
|
524 |
-
outputs = self.model(**inputs)
|
525 |
-
predictions = outputs.logits.argmax(dim=-1).item()
|
526 |
-
logger.info(f"Prediction made for input: {text} with result: {predictions}")
|
527 |
-
return {"prediction": predictions}
|
528 |
-
except Exception as e:
|
529 |
-
logger.error(f"Error making prediction: {str(e)}")
|
530 |
-
return {"error": str(e)}
|
531 |
-
|
532 |
-
# Update the Gradio interface to include model loading and prediction
|
533 |
-
def create_gradio_interface():
|
534 |
-
"""Create and configure Gradio interface with custom styling"""
|
535 |
-
bot = GitHubBot()
|
536 |
-
hf_model = None # Initialize Hugging Face model variable
|
537 |
-
|
538 |
-
with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
|
539 |
-
# Existing Gradio components...
|
540 |
-
|
541 |
-
model_name = gr.Textbox(
|
542 |
-
label="Hugging Face Model Name",
|
543 |
-
placeholder="Enter the model name (e.g., 'distilbert-base-uncased')",
|
544 |
-
elem_classes="input input-bordered input-primary"
|
545 |
-
)
|
546 |
-
|
547 |
-
load_model_button = gr.Button(
|
548 |
-
"Load Model",
|
549 |
-
elem_classes="button button-primary"
|
550 |
-
)
|
551 |
-
|
552 |
-
prediction_text = gr.Textbox(
|
553 |
-
label="Input Text for Prediction",
|
554 |
-
placeholder="Enter text to classify...",
|
555 |
-
elem_classes="textarea textarea-primary"
|
556 |
-
)
|
557 |
-
|
558 |
-
predict_button = gr.Button(
|
559 |
-
"Make Prediction",
|
560 |
-
elem_classes="button button-success"
|
561 |
-
)
|
562 |
-
|
563 |
-
output_prediction = gr.Textbox(
|
564 |
-
label="Prediction Output",
|
565 |
-
interactive=False,
|
566 |
-
elem_classes="output-area"
|
567 |
-
)
|
568 |
-
|
569 |
-
# Load model handler
|
570 |
-
def load_model_handler(model_name_input):
|
571 |
-
nonlocal hf_model
|
572 |
-
try:
|
573 |
-
hf_model = HuggingFaceModel(model_name_input)
|
574 |
-
return f"Model {model_name_input} loaded successfully."
|
575 |
-
except Exception as e:
|
576 |
-
return f"Error loading model: {str(e)}"
|
577 |
-
|
578 |
-
# Prediction handler
|
579 |
-
def predict_handler(input_text):
|
580 |
-
if hf_model is None:
|
581 |
-
return "Model not loaded. Please load a model first."
|
582 |
-
result = hf_model.predict(input_text)
|
583 |
-
return result
|
584 |
-
|
585 |
-
# Connect components
|
586 |
-
load_model_button.click(
|
587 |
-
load_model_handler,
|
588 |
-
inputs=[model_name],
|
589 |
-
outputs=[output_prediction]
|
590 |
-
)
|
591 |
-
|
592 |
-
predict_button.click(
|
593 |
-
predict_handler,
|
594 |
-
inputs=[prediction_text],
|
595 |
-
outputs=[output_prediction]
|
596 |
-
)
|
597 |
|
598 |
-
|
|
|
17 |
import http.client
|
18 |
|
19 |
# Suppress warnings
|
20 |
+
warnings.filterwarnings('ignore', category=UserWarning)
|
21 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
22 |
|
23 |
def initialize_environment():
|
|
|
52 |
|
53 |
class GitHubAPI:
|
54 |
"""GitHub API handler with rate limiting and error handling"""
|
55 |
+
|
56 |
def __init__(self, token: str):
|
57 |
self.token = token
|
58 |
self.headers = {
|
|
|
113 |
|
114 |
class GitHubBot:
|
115 |
"""Main GitHub bot implementation"""
|
116 |
+
|
117 |
def __init__(self):
|
118 |
+
self.github_api = None
|
119 |
def initialize_api(self, token: str):
|
120 |
"""Initialize GitHub API with token"""
|
121 |
self.github_api = GitHubAPI(token)
|
|
|
395 |
raise
|
396 |
finally:
|
397 |
logger.info("Application shutdown")
|
398 |
+
|
399 |
from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
|
400 |
|
401 |
class HuggingFaceModel:
|
402 |
"""Class to handle Hugging Face model loading and predictions"""
|
403 |
+
|
404 |
def __init__(self, model_name: str):
|
405 |
self.model_name = model_name
|
406 |
self.model = None
|
|
|
497 |
|
498 |
return demo
|
499 |
|
500 |
+
def initialize_zero_gpu():
|
501 |
+
"""Initialize Hugging Face ZeroGPU"""
|
502 |
+
try:
|
503 |
+
import torch
|
504 |
+
torch.cuda.is_available()
|
505 |
+
print("Hugging Face ZeroGPU is not available. Using GPU instead.")
|
506 |
+
except Exception as e:
|
507 |
+
print(f"Error initializing Hugging Face ZeroGPU: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
508 |
|
509 |
+
initialize_zero_gpu()
|