acecalisto3 commited on
Commit
8d6a4c9
·
verified ·
1 Parent(s): ee34f26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +251 -30
app.py CHANGED
@@ -17,7 +17,7 @@ import urllib.parse
17
  import http.client
18
 
19
  # Suppress warnings
20
- warnings.filterwarnings('ignore', category='User Warning')
21
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
22
 
23
  def initialize_environment():
@@ -27,16 +27,17 @@ def initialize_environment():
27
  for directory in directories:
28
  os.makedirs(directory, exist_ok=True)
29
 
30
- # Configure logging
 
31
  logging.basicConfig(
32
- level=logging.DEBUG,
33
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
34
- handlers=[
35
- logging.FileHandler(log_file),
36
- logging.StreamHandler()
37
  ]
38
  )
39
-
40
  # Set up global exception handler
41
  def handle_exception(exc_type, exc_value, exc_traceback):
42
  if issubclass(exc_type, KeyboardInterrupt):
@@ -114,9 +115,7 @@ class GitHubBot:
114
  """Main GitHub bot implementation"""
115
 
116
  def __init__(self):
117
- self.github_api = None
118
-
119
- def initialize_api(self, token: str):
120
  """Initialize GitHub API with token"""
121
  self.github_api = GitHubAPI(token)
122
 
@@ -143,13 +142,13 @@ class GitHubBot:
143
  f.write(f"# Resolution for Issue #{issue_number}\n\n{resolution}")
144
 
145
  # Clone the forked repo
146
- subprocess.run(['git', '-C', '/tmp', 'clone', forked_repo])
147
 
148
  # Change to the cloned directory
149
- subprocess.run(['cd', '/tmp/' + forked_repo.split('/')[-1]])
150
 
151
  # Assuming manual intervention now
152
- answer = input("Apply the fix manually and stage the changes (press ENTER)? ")
153
 
154
  # Commit and push the modifications
155
  subprocess.run(['git', 'add', '.'])
@@ -234,17 +233,10 @@ custom_css = """
234
  }
235
  """
236
 
237
- def greet(name):
238
- return f"Hello {name}!"
239
-
240
  def create_gradio_interface():
241
- with gr.Blocks() as demo:
242
- name = gr.Textbox(label="Name")
243
- output = gr.Textbox(label="Output")
244
- greet_btn = gr.Button("Greet")
245
- greet_btn.click(fn=greet, inputs=name, outputs=output)
246
- return demo
247
-
248
  with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
249
  gr.HTML("""
250
  <div class="container">
@@ -257,7 +249,7 @@ def create_gradio_interface():
257
  label="GitHub Token",
258
  placeholder="Enter your GitHub token",
259
  type="password",
260
- elem_classes=" input input-bordered input-primary"
261
  )
262
  github_username = gr.Textbox(
263
  label="Repository Owner",
@@ -368,9 +360,238 @@ def signal_handler(signum, frame):
368
  cleanup()
369
  sys.exit(0)
370
 
371
- logger = logging.getLogger(__name__)
372
-
373
  if __name__ == "__main__":
374
- demo = create_gradio_interface()
375
- demo.launch(server_name="0.0.0.0", server_port=7860)
376
- logger.info("Launch successful!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  import http.client
18
 
19
  # Suppress warnings
20
+ warnings.filterwarnings('ignore', category=User Warning) # Corrected here
21
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
22
 
23
  def initialize_environment():
 
27
  for directory in directories:
28
  os.makedirs(directory, exist_ok=True)
29
 
30
+ # Configure logging
31
+ log_file = f"logs/github_bot_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
32
  logging.basicConfig(
33
+ level=logging.INFO,
34
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
35
+ handlers=[
36
+ logging.FileHandler(log_file),
37
+ logging.StreamHandler()
38
  ]
39
  )
40
+
41
  # Set up global exception handler
42
  def handle_exception(exc_type, exc_value, exc_traceback):
43
  if issubclass(exc_type, KeyboardInterrupt):
 
115
  """Main GitHub bot implementation"""
116
 
117
  def __init__(self):
118
+ self.github_api = None def initialize_api(self, token: str):
 
 
119
  """Initialize GitHub API with token"""
120
  self.github_api = GitHubAPI(token)
121
 
 
142
  f.write(f"# Resolution for Issue #{issue_number}\n\n{resolution}")
143
 
144
  # Clone the forked repo
145
+ subprocess.run(['git', 'clone', forked_repo, '/tmp/' + forked_repo.split('/')[-1]])
146
 
147
  # Change to the cloned directory
148
+ os.chdir('/tmp/' + forked_repo.split('/')[-1])
149
 
150
  # Assuming manual intervention now
151
+ input("Apply the fix manually and stage the changes (press ENTER)? ")
152
 
153
  # Commit and push the modifications
154
  subprocess.run(['git', 'add', '.'])
 
233
  }
234
  """
235
 
 
 
 
236
  def create_gradio_interface():
237
+ """Create and configure Gradio interface with custom styling"""
238
+ bot = GitHub Bot()
239
+
 
 
 
 
240
  with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
241
  gr.HTML("""
242
  <div class="container">
 
249
  label="GitHub Token",
250
  placeholder="Enter your GitHub token",
251
  type="password",
252
+ elem_classes="input input-bordered input-primary"
253
  )
254
  github_username = gr.Textbox(
255
  label="Repository Owner",
 
360
  cleanup()
361
  sys.exit(0)
362
 
 
 
363
  if __name__ == "__main__":
364
+ # Register cleanup handlers
365
+ atexit.register(cleanup)
366
+ signal.signal(signal.SIGINT, signal_handler)
367
+ signal.signal(signal.SIGTERM, signal_handler)
368
+
369
+ try:
370
+ # Create and launch interface
371
+ demo = create_gradio_interface()
372
+
373
+ # Configure launch parameters
374
+ is_on_spaces = os.getenv("SPACE_ID") is not None
375
+ launch_kwargs = {
376
+ "server_name": "0.0.0.0",
377
+ "server_port": 7860,
378
+ "debug": True,
379
+ }
380
+
381
+ if not is_on_spaces:
382
+ launch_kwargs["share"] = True
383
+ logger.info("Running in local mode with public URL enabled")
384
+ else:
385
+ logger.info("Running on Hugging Face Spaces")
386
+
387
+ # Launch application
388
+ logger.info("Launching Gradio interface...")
389
+ demo = demo.queue()
390
+ demo.launch(**launch_kwargs)
391
+
392
+ except Exception as e:
393
+ logger.error(f"Error launching application: {str(e)}")
394
+ raise
395
+ finally:
396
+ logger.info("Application shutdown")
397
+ from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
398
+
399
+ class HuggingFaceModel:
400
+ """Class to handle Hugging Face model loading and predictions"""
401
+
402
+ def __init__(self, model_name: str):
403
+ self.model_name = model_name
404
+ self.model = None
405
+ self.tokenizer = None
406
+ self.load_model()
407
+
408
+ def load_model(self):
409
+ """Load the Hugging Face model and tokenizer"""
410
+ try:
411
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
412
+ self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name)
413
+ logger.info(f"Model {self.model_name} loaded successfully.")
414
+ except Exception as e:
415
+ logger.error(f"Error loading model {self.model_name}: {str(e)}")
416
+ raise
417
+
418
+ def predict(self, text: str) -> Dict:
419
+ """Make a prediction using the loaded model"""
420
+ try:
421
+ inputs = self.tokenizer(text, return_tensors="pt")
422
+ outputs = self.model(**inputs)
423
+ predictions = outputs.logits.argmax(dim=-1).item()
424
+ logger.info(f"Prediction made for input: {text} with result: {predictions}")
425
+ return {"prediction": predictions}
426
+ except Exception as e:
427
+ logger.error(f"Error making prediction: {str(e)}")
428
+ return {"error": str(e)}
429
+
430
+ # Update the Gradio interface to include model loading and prediction
431
+ def create_gradio_interface():
432
+ """Create and configure Gradio interface with custom styling"""
433
+ bot = GitHubBot()
434
+ hf_model = None # Initialize Hugging Face model variable
435
+
436
+ with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
437
+ # Existing Gradio components...
438
+
439
+ model_name = gr.Textbox(
440
+ label="Hugging Face Model Name",
441
+ placeholder="Enter the model name (e.g., 'distilbert-base-uncased')",
442
+ elem_classes="input input-bordered input-primary"
443
+ )
444
+
445
+ load_model_button = gr.Button(
446
+ "Load Model",
447
+ elem_classes="button button-primary"
448
+ )
449
+
450
+ prediction_text = gr.Textbox(
451
+ label="Input Text for Prediction",
452
+ placeholder="Enter text to classify...",
453
+ elem_classes="textarea textarea-primary"
454
+ )
455
+
456
+ predict_button = gr.Button(
457
+ "Make Prediction",
458
+ elem_classes="button button-success"
459
+ )
460
+
461
+ output_prediction = gr.Textbox(
462
+ label="Prediction Output",
463
+ interactive=False,
464
+ elem_classes="output-area"
465
+ )
466
+
467
+ # Load model handler
468
+ def load_model_handler(model_name_input):
469
+ nonlocal hf_model
470
+ try:
471
+ hf_model = HuggingFaceModel(model_name_input)
472
+ return f"Model {model_name_input} loaded successfully."
473
+ except Exception as e:
474
+ return f"Error loading model: {str(e)}"
475
+
476
+ # Prediction handler
477
+ def predict_handler(input_text):
478
+ if hf_model is None:
479
+ return "Model not loaded. Please load a model first."
480
+ result = hf_model.predict(input_text)
481
+ return result
482
+
483
+ # Connect components
484
+ load_model_button.click(
485
+ load_model_handler,
486
+ inputs=[model_name],
487
+ outputs=[output_prediction]
488
+ )
489
+
490
+ predict_button.click(
491
+ predict_handler,
492
+ inputs=[prediction_text],
493
+ outputs=[output_prediction]
494
+ )
495
+
496
+ return demo
497
+
498
+ from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
499
+
500
+ class HuggingFaceModel:
501
+ """Class to handle Hugging Face model loading and predictions"""
502
+
503
+ def __init__(self, model_name: str):
504
+ self.model_name = model_name
505
+ self.model = None
506
+ self.tokenizer = None
507
+ self.load_model()
508
+
509
+ def load_model(self):
510
+ """Load the Hugging Face model and tokenizer"""
511
+ try:
512
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
513
+ self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name)
514
+ logger.info(f"Model {self.model_name} loaded successfully.")
515
+ except Exception as e:
516
+ logger.error(f"Error loading model {self.model_name}: {str(e)}")
517
+ raise
518
+
519
+ def predict(self, text: str) -> Dict:
520
+ """Make a prediction using the loaded model"""
521
+ try:
522
+ inputs = self.tokenizer(text, return_tensors="pt")
523
+ outputs = self.model(**inputs)
524
+ predictions = outputs.logits.argmax(dim=-1).item()
525
+ logger.info(f"Prediction made for input: {text} with result: {predictions}")
526
+ return {"prediction": predictions}
527
+ except Exception as e:
528
+ logger.error(f"Error making prediction: {str(e)}")
529
+ return {"error": str(e)}
530
+
531
+ # Update the Gradio interface to include model loading and prediction
532
+ def create_gradio_interface():
533
+ """Create and configure Gradio interface with custom styling"""
534
+ bot = GitHubBot()
535
+ hf_model = None # Initialize Hugging Face model variable
536
+
537
+ with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
538
+ # Existing Gradio components...
539
+
540
+ model_name = gr.Textbox(
541
+ label="Hugging Face Model Name",
542
+ placeholder="Enter the model name (e.g., 'distilbert-base-uncased')",
543
+ elem_classes="input input-bordered input-primary"
544
+ )
545
+
546
+ load_model_button = gr.Button(
547
+ "Load Model",
548
+ elem_classes="button button-primary"
549
+ )
550
+
551
+ prediction_text = gr.Textbox(
552
+ label="Input Text for Prediction",
553
+ placeholder="Enter text to classify...",
554
+ elem_classes="textarea textarea-primary"
555
+ )
556
+
557
+ predict_button = gr.Button(
558
+ "Make Prediction",
559
+ elem_classes="button button-success"
560
+ )
561
+
562
+ output_prediction = gr.Textbox(
563
+ label="Prediction Output",
564
+ interactive=False,
565
+ elem_classes="output-area"
566
+ )
567
+
568
+ # Load model handler
569
+ def load_model_handler(model_name_input):
570
+ nonlocal hf_model
571
+ try:
572
+ hf_model = HuggingFaceModel(model_name_input)
573
+ return f"Model {model_name_input} loaded successfully."
574
+ except Exception as e:
575
+ return f"Error loading model: {str(e)}"
576
+
577
+ # Prediction handler
578
+ def predict_handler(input_text):
579
+ if hf_model is None:
580
+ return "Model not loaded. Please load a model first."
581
+ result = hf_model.predict(input_text)
582
+ return result
583
+
584
+ # Connect components
585
+ load_model_button.click(
586
+ load_model_handler,
587
+ inputs=[model_name],
588
+ outputs=[output_prediction]
589
+ )
590
+
591
+ predict_button.click(
592
+ predict_handler,
593
+ inputs=[prediction_text],
594
+ outputs=[output_prediction]
595
+ )
596
+
597
+ return demo