Spaces:
Sleeping
Sleeping
Joash
commited on
Commit
·
1de1c4f
1
Parent(s):
3e82f96
Simplify app.py and update configuration for ZeroGPU
Browse files
README.md
CHANGED
@@ -4,9 +4,11 @@ emoji: 🤖
|
|
4 |
colorFrom: blue
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
|
|
10 |
---
|
11 |
|
12 |
# Code Review Assistant
|
@@ -23,8 +25,15 @@ An automated code review system powered by Gemma-2b that provides intelligent co
|
|
23 |
|
24 |
### LLMOps Integration
|
25 |
- Uses Gemma-2b for intelligent code analysis
|
26 |
-
-
|
27 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
### User Interface
|
30 |
- Simple and intuitive Gradio interface
|
@@ -39,23 +48,24 @@ The following environment variables need to be set in your Hugging Face Space:
|
|
39 |
- `HUGGING_FACE_TOKEN`: Your Hugging Face API token (required)
|
40 |
- `MODEL_NAME`: google/gemma-2b-it (default)
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
## Usage
|
43 |
|
44 |
1. Enter your code in the text box
|
45 |
2. Select the programming language from the dropdown
|
46 |
-
3. Click "Submit
|
47 |
4. View the detailed analysis including:
|
48 |
- Critical issues
|
49 |
- Suggested improvements
|
50 |
- Best practices
|
51 |
- Security considerations
|
52 |
|
53 |
-
## Example Code
|
54 |
-
|
55 |
-
Try the included example code snippets to see how the review system works:
|
56 |
-
- Python function example
|
57 |
-
- JavaScript array processing example
|
58 |
-
|
59 |
## Model Details
|
60 |
|
61 |
This application uses the Gemma-2b-it model from Google, which is:
|
@@ -66,4 +76,4 @@ This application uses the Gemma-2b-it model from Google, which is:
|
|
66 |
|
67 |
## License
|
68 |
|
69 |
-
This project is licensed under the MIT License.
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.0.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
hf_oauth: false
|
11 |
+
hardware: a10g-small
|
12 |
---
|
13 |
|
14 |
# Code Review Assistant
|
|
|
25 |
|
26 |
### LLMOps Integration
|
27 |
- Uses Gemma-2b for intelligent code analysis
|
28 |
+
- Tracks model performance and accuracy
|
29 |
+
- Monitors response times and token usage
|
30 |
+
- Optimized with ZeroGPU for efficient inference
|
31 |
+
|
32 |
+
### Performance Monitoring
|
33 |
+
- Real-time metrics dashboard
|
34 |
+
- Review history tracking
|
35 |
+
- Response time monitoring
|
36 |
+
- Usage statistics
|
37 |
|
38 |
### User Interface
|
39 |
- Simple and intuitive Gradio interface
|
|
|
48 |
- `HUGGING_FACE_TOKEN`: Your Hugging Face API token (required)
|
49 |
- `MODEL_NAME`: google/gemma-2b-it (default)
|
50 |
|
51 |
+
## Hardware Configuration
|
52 |
+
|
53 |
+
This Space uses:
|
54 |
+
- Runtime: ZeroGPU
|
55 |
+
- Hardware: A10G Small
|
56 |
+
- Memory: Optimized for efficient model inference
|
57 |
+
|
58 |
## Usage
|
59 |
|
60 |
1. Enter your code in the text box
|
61 |
2. Select the programming language from the dropdown
|
62 |
+
3. Click "Submit for Review"
|
63 |
4. View the detailed analysis including:
|
64 |
- Critical issues
|
65 |
- Suggested improvements
|
66 |
- Best practices
|
67 |
- Security considerations
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
## Model Details
|
70 |
|
71 |
This application uses the Gemma-2b-it model from Google, which is:
|
|
|
76 |
|
77 |
## License
|
78 |
|
79 |
+
This project is licensed under the MIT License.
|
app.py
CHANGED
@@ -7,9 +7,6 @@ import logging
|
|
7 |
from datetime import datetime
|
8 |
import json
|
9 |
from typing import List, Dict
|
10 |
-
from huggingface_hub import HfApi
|
11 |
-
from huggingface_hub.spaces import SpaceHardware, SpaceStage
|
12 |
-
from huggingface_hub.spaces.space_sdk import SpaceRuntime
|
13 |
|
14 |
# Configure logging
|
15 |
logging.basicConfig(level=logging.INFO)
|
@@ -19,16 +16,6 @@ logger = logging.getLogger(__name__)
|
|
19 |
HF_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
|
20 |
MODEL_NAME = os.getenv("MODEL_NAME", "google/gemma-2b-it")
|
21 |
|
22 |
-
# Initialize Hugging Face API
|
23 |
-
api = HfApi()
|
24 |
-
|
25 |
-
# Space hardware configuration
|
26 |
-
space_config = {
|
27 |
-
"hardware": SpaceHardware.A10G_SMALL,
|
28 |
-
"stage": SpaceStage.RUNTIME,
|
29 |
-
"runtime": SpaceRuntime.ZEROGPU
|
30 |
-
}
|
31 |
-
|
32 |
class Review:
|
33 |
def __init__(self, code: str, language: str, suggestions: str):
|
34 |
self.code = code
|
@@ -41,7 +28,7 @@ class CodeReviewer:
|
|
41 |
def __init__(self):
|
42 |
self.model = None
|
43 |
self.tokenizer = None
|
44 |
-
self.device =
|
45 |
self.review_history: List[Review] = []
|
46 |
self.metrics = {
|
47 |
'total_reviews': 0,
|
@@ -64,17 +51,14 @@ class CodeReviewer:
|
|
64 |
)
|
65 |
|
66 |
logger.info("Loading model...")
|
67 |
-
# Initialize with ZeroGPU configuration
|
68 |
self.model = AutoModelForCausalLM.from_pretrained(
|
69 |
MODEL_NAME,
|
70 |
token=HF_TOKEN,
|
71 |
device_map="auto",
|
72 |
torch_dtype=torch.float16,
|
73 |
trust_remote_code=True,
|
74 |
-
low_cpu_mem_usage=True
|
75 |
-
use_zerogpu=True # Enable ZeroGPU
|
76 |
)
|
77 |
-
self.device = next(self.model.parameters()).device
|
78 |
logger.info(f"Model loaded successfully on {self.device}")
|
79 |
except Exception as e:
|
80 |
logger.error(f"Error initializing model: {e}")
|
@@ -105,7 +89,7 @@ Code:
|
|
105 |
truncation=True,
|
106 |
max_length=512,
|
107 |
padding=True
|
108 |
-
).to(self.device)
|
109 |
|
110 |
with torch.no_grad():
|
111 |
outputs = self.model.generate(
|
@@ -179,12 +163,8 @@ Code:
|
|
179 |
'Device': str(self.device)
|
180 |
}
|
181 |
|
182 |
-
# Initialize reviewer
|
183 |
-
|
184 |
-
def create_reviewer():
|
185 |
-
return CodeReviewer()
|
186 |
-
|
187 |
-
reviewer = create_reviewer()
|
188 |
|
189 |
# Create Gradio interface
|
190 |
with gr.Blocks(theme=gr.themes.Soft()) as iface:
|
|
|
7 |
from datetime import datetime
|
8 |
import json
|
9 |
from typing import List, Dict
|
|
|
|
|
|
|
10 |
|
11 |
# Configure logging
|
12 |
logging.basicConfig(level=logging.INFO)
|
|
|
16 |
HF_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
|
17 |
MODEL_NAME = os.getenv("MODEL_NAME", "google/gemma-2b-it")
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
class Review:
|
20 |
def __init__(self, code: str, language: str, suggestions: str):
|
21 |
self.code = code
|
|
|
28 |
def __init__(self):
|
29 |
self.model = None
|
30 |
self.tokenizer = None
|
31 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
32 |
self.review_history: List[Review] = []
|
33 |
self.metrics = {
|
34 |
'total_reviews': 0,
|
|
|
51 |
)
|
52 |
|
53 |
logger.info("Loading model...")
|
|
|
54 |
self.model = AutoModelForCausalLM.from_pretrained(
|
55 |
MODEL_NAME,
|
56 |
token=HF_TOKEN,
|
57 |
device_map="auto",
|
58 |
torch_dtype=torch.float16,
|
59 |
trust_remote_code=True,
|
60 |
+
low_cpu_mem_usage=True
|
|
|
61 |
)
|
|
|
62 |
logger.info(f"Model loaded successfully on {self.device}")
|
63 |
except Exception as e:
|
64 |
logger.error(f"Error initializing model: {e}")
|
|
|
89 |
truncation=True,
|
90 |
max_length=512,
|
91 |
padding=True
|
92 |
+
).to(self.device)
|
93 |
|
94 |
with torch.no_grad():
|
95 |
outputs = self.model.generate(
|
|
|
163 |
'Device': str(self.device)
|
164 |
}
|
165 |
|
166 |
+
# Initialize reviewer
|
167 |
+
reviewer = CodeReviewer()
|
|
|
|
|
|
|
|
|
168 |
|
169 |
# Create Gradio interface
|
170 |
with gr.Blocks(theme=gr.themes.Soft()) as iface:
|