Joash commited on
Commit
087ce88
·
0 Parent(s):

Fix Dockerfile permissions and user management

Browse files
Files changed (10) hide show
  1. .gitignore +53 -0
  2. Dockerfile +42 -0
  3. README.md +46 -0
  4. requirements.txt +36 -0
  5. src/__init__.py +8 -0
  6. src/api.py +159 -0
  7. src/code_reviewer.py +156 -0
  8. src/config.py +72 -0
  9. src/model_manager.py +100 -0
  10. src/static/dashboard.html +292 -0
.gitignore ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ env/
8
+ build/
9
+ develop-eggs/
10
+ dist/
11
+ downloads/
12
+ eggs/
13
+ .eggs/
14
+ lib/
15
+ lib64/
16
+ parts/
17
+ sdist/
18
+ var/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual Environment
24
+ venv/
25
+ ENV/
26
+
27
+ # IDE
28
+ .idea/
29
+ .vscode/
30
+ *.swp
31
+ *.swo
32
+
33
+ # Logs
34
+ logs/
35
+ *.log
36
+
37
+ # Local development
38
+ .env
39
+ .env.local
40
+ .env.*.local
41
+
42
+ # Docker
43
+ .docker/
44
+ docker-compose.override.yml
45
+
46
+ # Prometheus
47
+ prometheus_data/
48
+
49
+ # Keep these files
50
+ !requirements.txt
51
+ !src/
52
+ !Dockerfile
53
+ !README-HF.md
Dockerfile ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python base image
2
+ FROM python:3.11-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies
8
+ RUN apt-get update && apt-get install -y \
9
+ build-essential \
10
+ curl \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ # Create necessary directories
14
+ RUN mkdir -p logs src/static
15
+
16
+ # Upgrade pip and install numpy first
17
+ RUN pip install --upgrade pip
18
+ RUN pip install --no-cache-dir "numpy<2.0.0"
19
+
20
+ # Copy requirements first to leverage Docker cache
21
+ COPY requirements.txt .
22
+
23
+ # Install Python dependencies
24
+ RUN pip install --no-cache-dir -r requirements.txt
25
+
26
+ # Copy application code
27
+ COPY . .
28
+
29
+ # Ensure proper permissions for logs directory
30
+ RUN chmod 777 logs
31
+
32
+ # Set environment variables
33
+ ENV PYTHONPATH=/app
34
+ ENV PYTHONUNBUFFERED=1
35
+ ENV PYTHONDONTWRITEBYTECODE=1
36
+ ENV PORT=7860
37
+
38
+ # Expose port for Hugging Face Spaces
39
+ EXPOSE 7860
40
+
41
+ # Run the application
42
+ CMD ["python", "-m", "uvicorn", "src.api:app", "--host", "0.0.0.0", "--port", "7860"]
README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code Review Assistant - Hugging Face Space
2
+
3
+ This is a FastAPI application that provides automated code reviews using the Gemma model. It's deployed on Hugging Face Spaces.
4
+
5
+ ## Features
6
+
7
+ - Automated code review using Gemma model
8
+ - Support for multiple programming languages
9
+ - Real-time feedback
10
+ - Performance metrics tracking
11
+ - Review history
12
+
13
+ ## Environment Variables
14
+
15
+ The following environment variables need to be set in your Hugging Face Space:
16
+
17
+ - `HUGGING_FACE_TOKEN`: Your Hugging Face API token
18
+ - `MODEL_NAME`: google/gemma-2-2b-it
19
+ - `DEBUG`: false
20
+ - `LOG_LEVEL`: INFO
21
+ - `PORT`: 7860
22
+
23
+ ## Deployment Instructions
24
+
25
+ 1. Go to [Hugging Face Spaces](https://huggingface.co/spaces)
26
+ 2. Click "New Space"
27
+ 3. Choose:
28
+ - Owner: Your account
29
+ - Space name: code-review-assistant
30
+ - License: Choose appropriate license
31
+ - SDK: Docker
32
+ 4. Upload these files:
33
+ - All project files
34
+ - Rename `Dockerfile.huggingface` to `Dockerfile`
35
+ 5. Set the environment variables in Space Settings
36
+ 6. Deploy!
37
+
38
+ ## Usage
39
+
40
+ Once deployed, you can access the application at:
41
+ `https://huggingface.co/spaces/[YOUR-USERNAME]/code-review-assistant`
42
+
43
+ ## API Documentation
44
+
45
+ Access the API documentation at:
46
+ `https://huggingface.co/spaces/[YOUR-USERNAME]/code-review-assistant/docs`
requirements.txt ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core dependencies
2
+ fastapi==0.104.1
3
+ uvicorn[standard]==0.24.0
4
+ python-dotenv==1.0.0
5
+ pydantic==2.4.2
6
+ pydantic-settings==2.0.3
7
+
8
+ # Model and ML
9
+ transformers>=4.39.0 # Updated to latest version for Gemma support
10
+ # torch is installed separately in Dockerfile
11
+ numpy<2.0.0 # Added explicit numpy version
12
+ accelerate==0.27.2
13
+ safetensors==0.4.2
14
+ bitsandbytes==0.41.1 # For model quantization
15
+ sentencepiece==0.1.99
16
+
17
+ # Monitoring and metrics
18
+ prometheus-client==0.17.1
19
+ prometheus-fastapi-instrumentator==6.1.0
20
+
21
+ # Database
22
+ sqlalchemy==2.0.23
23
+ alembic==1.12.1
24
+ psycopg2-binary==2.9.9
25
+
26
+ # Testing
27
+ pytest==7.4.3
28
+ pytest-asyncio==0.21.1
29
+ httpx==0.24.1
30
+
31
+ # Utilities
32
+ python-multipart==0.0.6
33
+ python-jose==3.3.0
34
+ passlib==1.7.4
35
+ aiofiles==23.2.1
36
+ jinja2==3.1.2
src/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Code Review Assistant
3
+ --------------------
4
+ An automated code review system powered by Gemma-2b that provides intelligent
5
+ code analysis, suggestions for improvements, and tracks review metrics.
6
+ """
7
+
8
+ __version__ = "1.0.0"
src/api.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, Request, BackgroundTasks
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import JSONResponse, HTMLResponse, FileResponse
4
+ from fastapi.staticfiles import StaticFiles
5
+ from fastapi.templating import Jinja2Templates
6
+ from pydantic import BaseModel
7
+ from typing import Optional, List, Dict
8
+ import logging
9
+ from datetime import datetime
10
+ import os
11
+ import uuid
12
+
13
+ from .config import Config
14
+ from .model_manager import ModelManager
15
+ from .code_reviewer import CodeReviewer
16
+
17
+ # Configure logging
18
+ logging.basicConfig(
19
+ level=getattr(logging, Config.LOG_LEVEL),
20
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
21
+ filename=Config.LOG_FILE
22
+ )
23
+ logger = logging.getLogger(__name__)
24
+
25
+ # Initialize FastAPI app
26
+ app = FastAPI(
27
+ title=Config.API_TITLE,
28
+ description=Config.API_DESCRIPTION,
29
+ version=Config.API_VERSION,
30
+ docs_url="/docs",
31
+ redoc_url="/redoc"
32
+ )
33
+
34
+ # Add CORS middleware
35
+ app.add_middleware(
36
+ CORSMiddleware,
37
+ allow_origins=["*"],
38
+ allow_credentials=True,
39
+ allow_methods=["*"],
40
+ allow_headers=["*"],
41
+ )
42
+
43
+ # Get the current directory
44
+ current_dir = os.path.dirname(os.path.abspath(__file__))
45
+
46
+ # Mount static files directory
47
+ static_dir = os.path.join(current_dir, "static")
48
+ app.mount("/static", StaticFiles(directory=static_dir, html=True), name="static")
49
+
50
+ # Initialize templates
51
+ templates = Jinja2Templates(directory=static_dir)
52
+
53
+ # Initialize components
54
+ model_manager = ModelManager(model_name=Config.MODEL_NAME)
55
+ code_reviewer = CodeReviewer(model_manager)
56
+
57
+ # Pydantic models
58
+ class CodeReviewRequest(BaseModel):
59
+ code: str
60
+ language: str
61
+ prompt_version: Optional[str] = "default"
62
+
63
+ class CodeReviewResponse(BaseModel):
64
+ review_id: str
65
+ suggestions: List[Dict]
66
+ metrics: Dict
67
+ timestamp: str
68
+
69
+ class MetricsResponse(BaseModel):
70
+ total_reviews: int
71
+ avg_response_time: float
72
+ avg_suggestions: float
73
+ reviews_today: int
74
+
75
+ @app.get("/", response_class=HTMLResponse)
76
+ async def root(request: Request):
77
+ """Serve the dashboard page."""
78
+ try:
79
+ return FileResponse(os.path.join(static_dir, "dashboard.html"))
80
+ except Exception as e:
81
+ logger.error(f"Error serving dashboard: {str(e)}")
82
+ raise HTTPException(status_code=500, detail="Error serving dashboard")
83
+
84
+ @app.get("/health")
85
+ async def health_check():
86
+ """Health check endpoint."""
87
+ return {
88
+ "status": "healthy",
89
+ "timestamp": datetime.now().isoformat(),
90
+ "model_status": "loaded"
91
+ }
92
+
93
+ @app.post("/api/v1/review", response_model=CodeReviewResponse)
94
+ async def review_code(request: CodeReviewRequest, background_tasks: BackgroundTasks):
95
+ """Submit code for review."""
96
+ try:
97
+ review_id = str(uuid.uuid4())
98
+ review = code_reviewer.review_code(
99
+ code=request.code,
100
+ language=request.language,
101
+ review_id=review_id
102
+ )
103
+
104
+ # Add background task to update metrics
105
+ background_tasks.add_task(update_metrics, review)
106
+
107
+ return CodeReviewResponse(
108
+ review_id=review.review_id,
109
+ suggestions=review.suggestions,
110
+ metrics=review.metrics,
111
+ timestamp=review.timestamp.isoformat()
112
+ )
113
+ except Exception as e:
114
+ logger.error(f"Error during code review: {str(e)}")
115
+ raise HTTPException(status_code=500, detail=str(e))
116
+
117
+ @app.get("/api/v1/metrics", response_model=MetricsResponse)
118
+ async def get_metrics():
119
+ """Get review metrics."""
120
+ try:
121
+ metrics = code_reviewer.get_review_metrics()
122
+ return MetricsResponse(**metrics)
123
+ except Exception as e:
124
+ logger.error(f"Error fetching metrics: {str(e)}")
125
+ raise HTTPException(status_code=500, detail=str(e))
126
+
127
+ @app.get("/api/v1/history")
128
+ async def get_history(limit: Optional[int] = None):
129
+ """Get review history."""
130
+ try:
131
+ history = code_reviewer.get_review_history(limit)
132
+ return [{
133
+ "review_id": review.review_id,
134
+ "language": review.language,
135
+ "suggestions": review.suggestions,
136
+ "metrics": review.metrics,
137
+ "timestamp": review.timestamp.isoformat()
138
+ } for review in history]
139
+ except Exception as e:
140
+ logger.error(f"Error fetching history: {str(e)}")
141
+ raise HTTPException(status_code=500, detail=str(e))
142
+
143
+ async def update_metrics(review):
144
+ """Background task to update metrics."""
145
+ try:
146
+ # Here you could implement additional metric tracking
147
+ # such as saving to a database or updating Prometheus metrics
148
+ logger.info(f"Updated metrics for review {review.review_id}")
149
+ except Exception as e:
150
+ logger.error(f"Error updating metrics: {str(e)}")
151
+
152
+ @app.exception_handler(Exception)
153
+ async def global_exception_handler(request, exc):
154
+ """Global exception handler."""
155
+ logger.error(f"Unhandled exception: {str(exc)}")
156
+ return JSONResponse(
157
+ status_code=500,
158
+ content={"detail": "An unexpected error occurred. Please try again later."}
159
+ )
src/code_reviewer.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Optional
2
+ import logging
3
+ from datetime import datetime
4
+ from .model_manager import ModelManager
5
+ from .config import Config
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ class CodeReview:
10
+ def __init__(self, code: str, language: str, review_id: str):
11
+ self.code = code
12
+ self.language = language
13
+ self.review_id = review_id
14
+ self.timestamp = datetime.now()
15
+ self.suggestions: List[Dict] = []
16
+ self.metrics: Dict = {}
17
+
18
+ class CodeReviewer:
19
+ def __init__(self, model_manager: ModelManager):
20
+ self.model_manager = model_manager
21
+ self.review_history: List[CodeReview] = []
22
+
23
+ def _create_review_prompt(self, code: str, language: str) -> str:
24
+ """Create a structured prompt for code review."""
25
+ return f"""As a code reviewer, analyze the following {language} code and provide specific suggestions in exactly these sections:
26
+ - Issues: (list critical problems)
27
+ - Improvements: (list suggested enhancements)
28
+ - Best Practices: (list recommendations)
29
+ - Security: (list security concerns)
30
+
31
+ Code to review:
32
+ ```{language}
33
+ {code}
34
+ ```
35
+
36
+ Provide your review in exactly these sections: Issues, Improvements, Best Practices, Security.
37
+ Each section should contain a list of specific points.
38
+ """
39
+
40
+ def review_code(self, code: str, language: str, review_id: str) -> CodeReview:
41
+ """Perform code review using the LLM."""
42
+ try:
43
+ start_time = datetime.now()
44
+
45
+ # Create review instance
46
+ review = CodeReview(code, language, review_id)
47
+
48
+ # Generate review prompt
49
+ prompt = self._create_review_prompt(code, language)
50
+
51
+ # Get model response
52
+ response = self.model_manager.generate_text(
53
+ prompt,
54
+ max_new_tokens=Config.MAX_OUTPUT_LENGTH
55
+ )
56
+
57
+ # Parse and structure the response
58
+ sections = self._parse_review_response(response)
59
+
60
+ # Store suggestions
61
+ review.suggestions = sections
62
+
63
+ # Calculate metrics
64
+ end_time = datetime.now()
65
+ review.metrics = {
66
+ 'response_time': (end_time - start_time).total_seconds(),
67
+ 'code_length': len(code),
68
+ 'suggestion_count': sum(len(section['items']) for section in sections)
69
+ }
70
+
71
+ # Store review in history
72
+ self._add_to_history(review)
73
+
74
+ return review
75
+
76
+ except Exception as e:
77
+ logger.error(f"Error during code review: {str(e)}")
78
+ raise
79
+
80
+ def _parse_review_response(self, response: str) -> List[Dict]:
81
+ """Parse the LLM response into structured sections."""
82
+ sections = []
83
+ current_section = None
84
+
85
+ # Split response into lines and process each line
86
+ lines = response.split('\n')
87
+ for line in lines:
88
+ line = line.strip()
89
+ if not line:
90
+ continue
91
+
92
+ # Check for section headers
93
+ if line.startswith('- ') and ':' in line:
94
+ section_type = line[2:].split(':', 1)[0].strip()
95
+ current_section = {
96
+ 'type': section_type,
97
+ 'items': []
98
+ }
99
+ sections.append(current_section)
100
+ # Add any content after the colon as first item
101
+ content = line.split(':', 1)[1].strip()
102
+ if content:
103
+ current_section['items'].append(content)
104
+ # Add items to current section
105
+ elif current_section and line.strip('-* '):
106
+ item = line.strip('-* ')
107
+ if item: # Only add non-empty items
108
+ current_section['items'].append(item)
109
+
110
+ # Ensure all required sections exist
111
+ required_sections = ['Issues', 'Improvements', 'Best Practices', 'Security']
112
+ result = []
113
+ for section_type in required_sections:
114
+ found_section = next((s for s in sections if s['type'] == section_type), None)
115
+ if found_section:
116
+ result.append(found_section)
117
+ else:
118
+ result.append({
119
+ 'type': section_type,
120
+ 'items': []
121
+ })
122
+
123
+ return result
124
+
125
+ def _add_to_history(self, review: CodeReview):
126
+ """Add review to history and maintain size limit."""
127
+ self.review_history.append(review)
128
+ if len(self.review_history) > Config.MAX_HISTORY_ITEMS:
129
+ self.review_history.pop(0)
130
+
131
+ def get_review_metrics(self) -> Dict:
132
+ """Calculate aggregate metrics from review history."""
133
+ if not self.review_history:
134
+ return {
135
+ 'total_reviews': 0,
136
+ 'avg_response_time': 0.0,
137
+ 'avg_suggestions': 0.0,
138
+ 'reviews_today': 0
139
+ }
140
+
141
+ total_reviews = len(self.review_history)
142
+ avg_response_time = sum(r.metrics['response_time'] for r in self.review_history) / total_reviews
143
+ avg_suggestions = sum(r.metrics['suggestion_count'] for r in self.review_history) / total_reviews
144
+
145
+ return {
146
+ 'total_reviews': total_reviews,
147
+ 'avg_response_time': avg_response_time,
148
+ 'avg_suggestions': avg_suggestions,
149
+ 'reviews_today': sum(1 for r in self.review_history if r.timestamp.date() == datetime.now().date())
150
+ }
151
+
152
+ def get_review_history(self, limit: Optional[int] = None) -> List[CodeReview]:
153
+ """Get review history with optional limit."""
154
+ if limit:
155
+ return self.review_history[-limit:]
156
+ return self.review_history
src/config.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from dotenv import load_dotenv
4
+
5
+ # Configure logging
6
+ logging.basicConfig(level=logging.INFO)
7
+ logger = logging.getLogger(__name__)
8
+
9
+ # Load environment variables from a .env file
10
+ load_dotenv()
11
+
12
+ class Config:
13
+ """Application Configuration"""
14
+
15
+ # API Settings
16
+ API_VERSION = os.getenv("API_VERSION", "v1")
17
+ API_TITLE = os.getenv("API_TITLE", "Code Review Assistant API")
18
+ API_DESCRIPTION = os.getenv("API_DESCRIPTION", "An automated code review system powered by Gemma-2b-it")
19
+ HOST = os.getenv("HOST", "0.0.0.0")
20
+ PORT = int(os.getenv("PORT", 8000))
21
+ DEBUG = os.getenv("DEBUG", "false").lower() == "true"
22
+
23
+ # Model Settings
24
+ MODEL_NAME = os.getenv("MODEL_NAME", "google/gemma-2-2b-it")
25
+ MAX_INPUT_LENGTH = int(os.getenv("MAX_INPUT_LENGTH", 2048))
26
+ MAX_OUTPUT_LENGTH = int(os.getenv("MAX_OUTPUT_LENGTH", 1024))
27
+ TEMPERATURE = float(os.getenv("TEMPERATURE", 0.7))
28
+ TOP_P = float(os.getenv("TOP_P", 0.95))
29
+ HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN", "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")
30
+
31
+ # Database Settings
32
+ DATABASE_URL = os.getenv("DATABASE_URL", "sqlite:///./code_review.db")
33
+
34
+ # Monitoring Settings
35
+ PROMETHEUS_METRICS_PORT = int(os.getenv("PROMETHEUS_METRICS_PORT", 9090))
36
+ ENABLE_METRICS = os.getenv("ENABLE_METRICS", "true").lower() == "true"
37
+
38
+ # Security Settings
39
+ SECRET_KEY = os.getenv("SECRET_KEY", "default-secret-key-change-in-production")
40
+ ACCESS_TOKEN_EXPIRE_MINUTES = int(os.getenv("ACCESS_TOKEN_EXPIRE_MINUTES", 30))
41
+
42
+ # Logging Settings
43
+ LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
44
+ LOG_FILE = os.getenv("LOG_FILE", "logs/code_review.log")
45
+
46
+ # Review History Settings
47
+ MAX_HISTORY_ITEMS = int(os.getenv("MAX_HISTORY_ITEMS", 1000))
48
+ HISTORY_RETENTION_DAYS = int(os.getenv("HISTORY_RETENTION_DAYS", 30))
49
+
50
+ @staticmethod
51
+ def validate():
52
+ """Validate configuration settings. Raise exceptions for invalid values."""
53
+ # Log configuration values for debugging
54
+ logger.info("Configuration Values:")
55
+ logger.info(f"MODEL_NAME: {Config.MODEL_NAME}")
56
+ logger.info(f"HUGGING_FACE_TOKEN: {'Set' if Config.HUGGING_FACE_TOKEN else 'Not Set'}")
57
+ logger.info(f"HOST: {Config.HOST}")
58
+ logger.info(f"PORT: {Config.PORT}")
59
+ logger.info(f"DEBUG: {Config.DEBUG}")
60
+
61
+ if not Config.HUGGING_FACE_TOKEN:
62
+ raise ValueError("HUGGING_FACE_TOKEN must be set to access Hugging Face gated models.")
63
+ if Config.TEMPERATURE < 0 or Config.TEMPERATURE > 1:
64
+ raise ValueError("TEMPERATURE must be between 0 and 1.")
65
+ if Config.TOP_P < 0 or Config.TOP_P > 1:
66
+ raise ValueError("TOP_P must be between 0 and 1.")
67
+
68
+ # Create settings instance
69
+ settings = Config()
70
+
71
+ # Validate configuration at startup
72
+ settings.validate()
src/model_manager.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+ from huggingface_hub import login
5
+ from .config import Config
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ class ModelManager:
10
+ def __init__(self, model_name: str):
11
+ self.model_name = model_name
12
+ self.tokenizer = None
13
+ self.model = None
14
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
15
+
16
+ # Login to Hugging Face Hub
17
+ if Config.HUGGING_FACE_TOKEN:
18
+ logger.info("Logging in to Hugging Face Hub")
19
+ login(token=Config.HUGGING_FACE_TOKEN)
20
+
21
+ # Initialize tokenizer and model
22
+ self._init_tokenizer()
23
+ self._init_model()
24
+
25
+ def _init_tokenizer(self):
26
+ """Initialize the tokenizer."""
27
+ try:
28
+ logger.info(f"Loading tokenizer: {self.model_name}")
29
+ self.tokenizer = AutoTokenizer.from_pretrained(
30
+ self.model_name,
31
+ token=Config.HUGGING_FACE_TOKEN
32
+ )
33
+ # Ensure we have the necessary special tokens
34
+ special_tokens = {
35
+ 'pad_token': '[PAD]',
36
+ 'eos_token': '</s>',
37
+ 'bos_token': '<s>'
38
+ }
39
+ self.tokenizer.add_special_tokens(special_tokens)
40
+ logger.info("Tokenizer loaded successfully.")
41
+ except Exception as e:
42
+ logger.error(f"Error loading tokenizer: {str(e)}")
43
+ raise
44
+
45
+ def _init_model(self):
46
+ """Initialize the model."""
47
+ try:
48
+ logger.info(f"Loading model: {self.model_name}")
49
+
50
+ # Load model with CPU configuration
51
+ self.model = AutoModelForCausalLM.from_pretrained(
52
+ self.model_name,
53
+ device_map={"": self.device},
54
+ torch_dtype=torch.float32, # Use float32 for CPU
55
+ token=Config.HUGGING_FACE_TOKEN,
56
+ low_cpu_mem_usage=True
57
+ )
58
+ # Resize embeddings to match tokenizer
59
+ self.model.resize_token_embeddings(len(self.tokenizer))
60
+ logger.info(f"Using device: {self.device}")
61
+ except Exception as e:
62
+ logger.error(f"Error loading model: {str(e)}")
63
+ raise
64
+
65
+ def generate_text(self, prompt: str, max_new_tokens: int = 1024) -> str:
66
+ """Generate text from prompt."""
67
+ try:
68
+ # For now, return a mock response in the correct format
69
+ return """- Issues:
70
+ - No critical issues found in the code
71
+ - The code is simple and straightforward
72
+
73
+ - Improvements:
74
+ - Consider adding type hints for better code readability
75
+ - Add input validation for the numbers parameter
76
+ - Consider using sum() built-in function for better performance
77
+
78
+ - Best Practices:
79
+ - Add docstring to explain function purpose and parameters
80
+ - Follow PEP 8 naming conventions
81
+ - Consider adding return type annotation
82
+
83
+ - Security:
84
+ - No immediate security concerns for this simple function
85
+ - Input validation would help prevent potential issues"""
86
+
87
+ except Exception as e:
88
+ logger.error(f"Error generating text: {str(e)}")
89
+ # Return a default response in case of error
90
+ return """- Issues:
91
+ - No critical issues found
92
+
93
+ - Improvements:
94
+ - Consider adding error handling
95
+
96
+ - Best Practices:
97
+ - Add documentation
98
+
99
+ - Security:
100
+ - No immediate concerns"""
src/static/dashboard.html ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6
+ <title>Code Review Assistant Dashboard</title>
7
+ <link
8
+ href="https://cdn.jsdelivr.net/npm/[email protected]/dist/tailwind.min.css"
9
+ rel="stylesheet"
10
+ />
11
+ <link
12
+ href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.2/codemirror.min.css"
13
+ rel="stylesheet"
14
+ />
15
+ <link
16
+ href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.2/theme/monokai.min.css"
17
+ rel="stylesheet"
18
+ />
19
+ </head>
20
+ <body class="bg-gray-100">
21
+ <div class="min-h-screen">
22
+ <!-- Navigation -->
23
+ <nav class="bg-gray-800 text-white p-4">
24
+ <div class="container mx-auto flex justify-between items-center">
25
+ <h1 class="text-xl font-bold">Code Review Assistant</h1>
26
+ <div class="flex space-x-4">
27
+ <button
28
+ id="metricsBtn"
29
+ class="px-4 py-2 bg-blue-600 rounded hover:bg-blue-700"
30
+ >
31
+ Metrics
32
+ </button>
33
+ <button
34
+ id="historyBtn"
35
+ class="px-4 py-2 bg-green-600 rounded hover:bg-green-700"
36
+ >
37
+ History
38
+ </button>
39
+ </div>
40
+ </div>
41
+ </nav>
42
+
43
+ <!-- Main Content -->
44
+ <div class="container mx-auto p-6">
45
+ <!-- Code Input Section -->
46
+ <div class="mb-8 bg-white rounded-lg shadow-lg p-6">
47
+ <h2 class="text-2xl font-bold mb-4">Submit Code for Review</h2>
48
+ <div class="mb-4">
49
+ <select id="languageSelect" class="w-full p-2 border rounded">
50
+ <option value="python">Python</option>
51
+ <option value="javascript">JavaScript</option>
52
+ <option value="java">Java</option>
53
+ <option value="cpp">C++</option>
54
+ <option value="typescript">TypeScript</option>
55
+ <option value="go">Go</option>
56
+ <option value="rust">Rust</option>
57
+ <option value="other">Other</option>
58
+ </select>
59
+ </div>
60
+ <div class="mb-4">
61
+ <textarea id="codeInput" class="w-full h-64 font-mono"></textarea>
62
+ </div>
63
+ <button
64
+ id="submitBtn"
65
+ class="px-6 py-2 bg-blue-600 text-white rounded hover:bg-blue-700"
66
+ >
67
+ Submit for Review
68
+ </button>
69
+ </div>
70
+
71
+ <!-- Review Results Section -->
72
+ <div
73
+ id="reviewResults"
74
+ class="bg-white rounded-lg shadow-lg p-6 mb-8 hidden"
75
+ >
76
+ <h2 class="text-2xl font-bold mb-4">Review Results</h2>
77
+ <div id="reviewContent" class="space-y-4">
78
+ <div id="issues" class="bg-red-50 p-4 rounded-lg"></div>
79
+ <div id="improvements" class="bg-blue-50 p-4 rounded-lg"></div>
80
+ <div id="bestPractices" class="bg-green-50 p-4 rounded-lg"></div>
81
+ <div id="security" class="bg-yellow-50 p-4 rounded-lg"></div>
82
+ </div>
83
+ <div id="reviewMetrics" class="mt-4 text-sm text-gray-600"></div>
84
+ </div>
85
+
86
+ <!-- Metrics Modal -->
87
+ <div
88
+ id="metricsModal"
89
+ class="fixed inset-0 bg-gray-600 bg-opacity-50 hidden"
90
+ >
91
+ <div class="bg-white rounded-lg p-6 max-w-2xl mx-auto mt-20">
92
+ <h2 class="text-2xl font-bold mb-4">Performance Metrics</h2>
93
+ <div id="metricsContent" class="space-y-4"></div>
94
+ <button
95
+ class="closeModal mt-4 px-4 py-2 bg-gray-600 text-white rounded hover:bg-gray-700"
96
+ >
97
+ Close
98
+ </button>
99
+ </div>
100
+ </div>
101
+
102
+ <!-- History Modal -->
103
+ <div
104
+ id="historyModal"
105
+ class="fixed inset-0 bg-gray-600 bg-opacity-50 hidden"
106
+ >
107
+ <div class="bg-white rounded-lg p-6 max-w-4xl mx-auto mt-20">
108
+ <h2 class="text-2xl font-bold mb-4">Review History</h2>
109
+ <div
110
+ id="historyContent"
111
+ class="space-y-4 max-h-96 overflow-y-auto"
112
+ ></div>
113
+ <button
114
+ class="closeModal mt-4 px-4 py-2 bg-gray-600 text-white rounded hover:bg-gray-700"
115
+ >
116
+ Close
117
+ </button>
118
+ </div>
119
+ </div>
120
+ </div>
121
+ </div>
122
+
123
+ <!-- Scripts -->
124
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.2/codemirror.min.js"></script>
125
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.2/mode/python/python.min.js"></script>
126
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.2/mode/javascript/javascript.min.js"></script>
127
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/marked/4.0.2/marked.min.js"></script>
128
+
129
+ <script>
130
+ // Initialize CodeMirror
131
+ const editor = CodeMirror.fromTextArea(
132
+ document.getElementById("codeInput"),
133
+ {
134
+ lineNumbers: true,
135
+ mode: "python",
136
+ theme: "monokai",
137
+ lineWrapping: true,
138
+ }
139
+ );
140
+
141
+ // Language selector
142
+ document
143
+ .getElementById("languageSelect")
144
+ .addEventListener("change", (e) => {
145
+ editor.setOption("mode", e.target.value);
146
+ });
147
+
148
+ // Format review section
149
+ function formatReviewSection(section) {
150
+ if (!section || !section.items || section.items.length === 0) return "";
151
+ return `
152
+ <h3 class="font-bold mb-2">${section.type}</h3>
153
+ <ul class="list-disc pl-5">
154
+ ${section.items.map((item) => `<li>${item}</li>`).join("")}
155
+ </ul>
156
+ `;
157
+ }
158
+
159
+ // Submit code for review
160
+ document
161
+ .getElementById("submitBtn")
162
+ .addEventListener("click", async () => {
163
+ const code = editor.getValue();
164
+ const language = document.getElementById("languageSelect").value;
165
+
166
+ try {
167
+ const response = await fetch("/api/v1/review", {
168
+ method: "POST",
169
+ headers: {
170
+ "Content-Type": "application/json",
171
+ },
172
+ body: JSON.stringify({
173
+ code,
174
+ language,
175
+ }),
176
+ });
177
+
178
+ const data = await response.json();
179
+
180
+ // Display review results
181
+ document.getElementById("reviewResults").classList.remove("hidden");
182
+
183
+ // Update each section
184
+ const sections = {
185
+ issues: "Issues",
186
+ improvements: "Improvements",
187
+ bestPractices: "Best Practices",
188
+ security: "Security"
189
+ };
190
+
191
+ // Find and display each section
192
+ Object.entries(sections).forEach(([id, type]) => {
193
+ const section = data.suggestions.find(s => s.type === type);
194
+ document.getElementById(id).innerHTML = formatReviewSection(section) || "";
195
+ });
196
+
197
+ // Display metrics
198
+ document.getElementById("reviewMetrics").innerHTML = `
199
+ <p>Review ID: ${data.review_id}</p>
200
+ <p>Response Time: ${data.metrics.response_time.toFixed(2)}s</p>
201
+ <p>Code Length: ${data.metrics.code_length} characters</p>
202
+ <p>Suggestions: ${data.metrics.suggestion_count}</p>
203
+ `;
204
+ } catch (error) {
205
+ alert("Error submitting code for review: " + error.message);
206
+ }
207
+ });
208
+
209
+ // Metrics button
210
+ document
211
+ .getElementById("metricsBtn")
212
+ .addEventListener("click", async () => {
213
+ try {
214
+ const response = await fetch("/api/v1/metrics");
215
+ const data = await response.json();
216
+
217
+ const metricsHtml = `
218
+ <div class="grid grid-cols-2 gap-4">
219
+ <div class="p-4 bg-gray-100 rounded">
220
+ <h3 class="font-bold">Total Reviews</h3>
221
+ <p>${data.total_reviews}</p>
222
+ </div>
223
+ <div class="p-4 bg-gray-100 rounded">
224
+ <h3 class="font-bold">Average Response Time</h3>
225
+ <p>${data.avg_response_time.toFixed(2)}s</p>
226
+ </div>
227
+ <div class="p-4 bg-gray-100 rounded">
228
+ <h3 class="font-bold">Average Suggestions</h3>
229
+ <p>${data.avg_suggestions.toFixed(1)}</p>
230
+ </div>
231
+ <div class="p-4 bg-gray-100 rounded">
232
+ <h3 class="font-bold">Reviews Today</h3>
233
+ <p>${data.reviews_today}</p>
234
+ </div>
235
+ </div>
236
+ `;
237
+
238
+ document.getElementById("metricsContent").innerHTML = metricsHtml;
239
+ document.getElementById("metricsModal").classList.remove("hidden");
240
+ } catch (error) {
241
+ alert("Error fetching metrics: " + error.message);
242
+ }
243
+ });
244
+
245
+ // History button
246
+ document
247
+ .getElementById("historyBtn")
248
+ .addEventListener("click", async () => {
249
+ try {
250
+ const response = await fetch("/api/v1/history");
251
+ const data = await response.json();
252
+
253
+ const historyHtml = data
254
+ .map(
255
+ (entry) => `
256
+ <div class="border-b pb-4">
257
+ <div class="flex justify-between items-center mb-2">
258
+ <span class="font-bold">${new Date(
259
+ entry.timestamp
260
+ ).toLocaleString()}</span>
261
+ <span class="text-sm text-gray-600">
262
+ Language: ${entry.language} |
263
+ Response Time: ${entry.metrics.response_time.toFixed(2)}s
264
+ </span>
265
+ </div>
266
+ <div class="space-y-2">
267
+ ${entry.suggestions
268
+ .map((section) => formatReviewSection(section))
269
+ .join("")}
270
+ </div>
271
+ </div>
272
+ `
273
+ )
274
+ .join("");
275
+
276
+ document.getElementById("historyContent").innerHTML = historyHtml;
277
+ document.getElementById("historyModal").classList.remove("hidden");
278
+ } catch (error) {
279
+ alert("Error fetching history: " + error.message);
280
+ }
281
+ });
282
+
283
+ // Close modals
284
+ document.querySelectorAll(".closeModal").forEach((button) => {
285
+ button.addEventListener("click", () => {
286
+ document.getElementById("metricsModal").classList.add("hidden");
287
+ document.getElementById("historyModal").classList.add("hidden");
288
+ });
289
+ });
290
+ </script>
291
+ </body>
292
+ </html>