|
|
|
|
|
|
|
""" |
|
Performance Analyzer Service |
|
|
|
This module provides functionality for analyzing code performance across different languages. |
|
""" |
|
|
|
import os |
|
import re |
|
import logging |
|
import subprocess |
|
import json |
|
import concurrent.futures |
|
from collections import defaultdict |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
class PerformanceAnalyzer: |
|
""" |
|
Service for analyzing code performance across different languages. |
|
""" |
|
|
|
def __init__(self): |
|
""" |
|
Initialize the PerformanceAnalyzer. |
|
""" |
|
logger.info("Initialized PerformanceAnalyzer") |
|
self.analyzers = { |
|
'Python': self._analyze_python_performance, |
|
'JavaScript': self._analyze_javascript_performance, |
|
'TypeScript': self._analyze_typescript_performance, |
|
'Java': self._analyze_java_performance, |
|
'Go': self._analyze_go_performance, |
|
'Rust': self._analyze_rust_performance, |
|
} |
|
|
|
|
|
self._init_performance_patterns() |
|
|
|
def _init_performance_patterns(self): |
|
""" |
|
Initialize performance patterns for different languages. |
|
""" |
|
|
|
self.python_patterns = [ |
|
{ |
|
'name': 'Inefficient list comprehension', |
|
'pattern': r'\[.*?for.*?in.*?for.*?in.*?\]', |
|
'severity': 'medium', |
|
'description': 'Nested list comprehensions can be inefficient for large datasets.', |
|
'suggestion': 'Consider using itertools or breaking into separate operations.', |
|
}, |
|
{ |
|
'name': 'String concatenation in loop', |
|
'pattern': r'for.*?\+\=\s*[\'\"](.*?)[\'\"]', |
|
'severity': 'medium', |
|
'description': 'String concatenation in loops is inefficient in Python.', |
|
'suggestion': 'Use string join() or a list of strings with join() at the end.', |
|
}, |
|
{ |
|
'name': 'Global variable in loop', |
|
'pattern': r'global\s+\w+.*?for\s+\w+\s+in', |
|
'severity': 'medium', |
|
'description': 'Modifying global variables in loops can be inefficient.', |
|
'suggestion': 'Use local variables and return values instead.', |
|
}, |
|
{ |
|
'name': 'Inefficient dict/list access in loop', |
|
'pattern': r'for.*?in.*?:\s*.*?\[.*?\]\s*=', |
|
'severity': 'medium', |
|
'description': 'Repeatedly accessing dictionary or list elements in a loop can be inefficient.', |
|
'suggestion': 'Consider using a local variable to store the accessed element.', |
|
}, |
|
{ |
|
'name': 'Using range(len())', |
|
'pattern': r'for\s+\w+\s+in\s+range\(len\(', |
|
'severity': 'low', |
|
'description': 'Using range(len()) is less readable than using enumerate().', |
|
'suggestion': 'Use enumerate() instead of range(len()).', |
|
}, |
|
{ |
|
'name': 'Inefficient regular expression', |
|
'pattern': r're\.compile\([\'\"].*?[\+\*].*?[\'\"]\)', |
|
'severity': 'medium', |
|
'description': 'Complex regular expressions can be inefficient.', |
|
'suggestion': 'Simplify the regular expression or use more specific patterns.', |
|
}, |
|
{ |
|
'name': 'Large memory allocation', |
|
'pattern': r'\[.*?for.*?in\s+range\(\d{7,}\)\]', |
|
'severity': 'high', |
|
'description': 'Creating large lists in memory can cause performance issues.', |
|
'suggestion': 'Use generators or iterators instead of creating large lists.', |
|
}, |
|
{ |
|
'name': 'Inefficient database query in loop', |
|
'pattern': r'for.*?in.*?:\s*.*?\.execute\(', |
|
'severity': 'high', |
|
'description': 'Executing database queries in a loop can be very inefficient.', |
|
'suggestion': 'Use batch operations or join queries instead of querying in a loop.', |
|
}, |
|
] |
|
|
|
|
|
self.javascript_patterns = [ |
|
{ |
|
'name': 'DOM manipulation in loop', |
|
'pattern': r'for\s*\(.*?\)\s*\{.*?document\..*?\}', |
|
'severity': 'high', |
|
'description': 'Manipulating the DOM inside loops can cause performance issues.', |
|
'suggestion': 'Batch DOM updates or use DocumentFragment.', |
|
}, |
|
{ |
|
'name': 'Inefficient array manipulation', |
|
'pattern': r'for\s*\(.*?\)\s*\{.*?splice\(.*?\}', |
|
'severity': 'medium', |
|
'description': 'Using splice() in loops can be inefficient for large arrays.', |
|
'suggestion': 'Consider using filter() or other array methods.', |
|
}, |
|
{ |
|
'name': 'Creating functions in loops', |
|
'pattern': r'for\s*\(.*?\)\s*\{.*?function\s*\(.*?\)\s*\{.*?\}.*?\}', |
|
'severity': 'medium', |
|
'description': 'Creating functions inside loops can lead to performance issues.', |
|
'suggestion': 'Define the function outside the loop and reference it.', |
|
}, |
|
{ |
|
'name': 'Inefficient string concatenation', |
|
'pattern': r'for\s*\(.*?\)\s*\{.*?\+\=\s*[\'\"](.*?)[\'\"].*?\}', |
|
'severity': 'medium', |
|
'description': 'String concatenation in loops can be inefficient.', |
|
'suggestion': 'Use array join() or template literals.', |
|
}, |
|
{ |
|
'name': 'Using eval()', |
|
'pattern': r'eval\(', |
|
'severity': 'high', |
|
'description': 'Using eval() is slow and can introduce security vulnerabilities.', |
|
'suggestion': 'Avoid using eval() and use safer alternatives.', |
|
}, |
|
{ |
|
'name': 'Inefficient event handling', |
|
'pattern': r'addEventListener\([\'\"].*?[\'\"],\s*function', |
|
'severity': 'medium', |
|
'description': 'Anonymous functions in event listeners can lead to memory leaks.', |
|
'suggestion': 'Use named functions for event handlers to allow proper cleanup.', |
|
}, |
|
] |
|
|
|
|
|
self.typescript_patterns = self.javascript_patterns + [ |
|
{ |
|
'name': 'Inefficient type assertion', |
|
'pattern': r'<.*?>\s*\(.*?\)', |
|
'severity': 'low', |
|
'description': 'Excessive type assertions can impact runtime performance.', |
|
'suggestion': 'Use proper typing and interfaces instead of frequent type assertions.', |
|
}, |
|
{ |
|
'name': 'Complex type definitions', |
|
'pattern': r'type\s+\w+\s*=\s*\{[^\}]{500,}\}', |
|
'severity': 'medium', |
|
'description': 'Overly complex type definitions can slow down the TypeScript compiler.', |
|
'suggestion': 'Break complex types into smaller, reusable interfaces.', |
|
}, |
|
] |
|
|
|
|
|
self.java_patterns = [ |
|
{ |
|
'name': 'Inefficient string concatenation', |
|
'pattern': r'for\s*\(.*?\)\s*\{.*?\+\=\s*[\'\"](.*?)[\'\"].*?\}', |
|
'severity': 'medium', |
|
'description': 'String concatenation in loops is inefficient in Java.', |
|
'suggestion': 'Use StringBuilder or StringBuffer instead.', |
|
}, |
|
{ |
|
'name': 'Creating objects in loops', |
|
'pattern': r'for\s*\(.*?\)\s*\{.*?new\s+\w+\(.*?\).*?\}', |
|
'severity': 'medium', |
|
'description': 'Creating objects inside loops can lead to excessive garbage collection.', |
|
'suggestion': 'Create objects outside the loop or use object pooling.', |
|
}, |
|
{ |
|
'name': 'Inefficient collection iteration', |
|
'pattern': r'for\s*\(int\s+i\s*=\s*0.*?i\s*<\s*\w+\.size\(\).*?\)', |
|
'severity': 'low', |
|
'description': 'Calling size() in each iteration can be inefficient for some collections.', |
|
'suggestion': 'Store the size in a variable before the loop.', |
|
}, |
|
{ |
|
'name': 'Using boxed primitives in performance-critical code', |
|
'pattern': r'(Integer|Boolean|Double|Float|Long)\s+\w+\s*=', |
|
'severity': 'low', |
|
'description': 'Using boxed primitives can be less efficient than primitive types.', |
|
'suggestion': 'Use primitive types (int, boolean, etc.) in performance-critical code.', |
|
}, |
|
{ |
|
'name': 'Inefficient exception handling', |
|
'pattern': r'try\s*\{.*?\}\s*catch\s*\(Exception\s+\w+\)\s*\{', |
|
'severity': 'medium', |
|
'description': 'Catching generic exceptions can hide issues and impact performance.', |
|
'suggestion': 'Catch specific exceptions and handle them appropriately.', |
|
}, |
|
] |
|
|
|
|
|
self.go_patterns = [ |
|
{ |
|
'name': 'Inefficient string concatenation', |
|
'pattern': r'for\s+.*?\{.*?\+\=\s*[\'\"](.*?)[\'\"].*?\}', |
|
'severity': 'medium', |
|
'description': 'String concatenation in loops can be inefficient.', |
|
'suggestion': 'Use strings.Builder for string concatenation in loops.', |
|
}, |
|
{ |
|
'name': 'Inefficient slice operations', |
|
'pattern': r'for\s+.*?\{.*?append\(.*?\}', |
|
'severity': 'medium', |
|
'description': 'Repeatedly appending to a slice can cause multiple allocations.', |
|
'suggestion': 'Pre-allocate slices with make() when the size is known.', |
|
}, |
|
{ |
|
'name': 'Mutex in hot path', |
|
'pattern': r'func\s+\(.*?\)\s+\w+\(.*?\)\s+\{.*?Lock\(\).*?Unlock\(\)', |
|
'severity': 'medium', |
|
'description': 'Using mutexes in frequently called functions can impact performance.', |
|
'suggestion': 'Consider using atomic operations or redesigning for less contention.', |
|
}, |
|
{ |
|
'name': 'Inefficient map iteration', |
|
'pattern': r'for\s+\w+,\s*_\s*:=\s*range', |
|
'severity': 'low', |
|
'description': 'Iterating over maps when only keys are needed can be inefficient.', |
|
'suggestion': 'Use a slice for ordered data when possible.', |
|
}, |
|
] |
|
|
|
|
|
self.rust_patterns = [ |
|
{ |
|
'name': 'Inefficient string operations', |
|
'pattern': r'for\s+.*?\{.*?\.push_str\(.*?\}', |
|
'severity': 'medium', |
|
'description': 'Repeatedly pushing to strings can be inefficient.', |
|
'suggestion': 'Use string concatenation with the format! macro or String::with_capacity().', |
|
}, |
|
{ |
|
'name': 'Excessive cloning', |
|
'pattern': r'\.clone\(\)', |
|
'severity': 'medium', |
|
'description': 'Excessive cloning can impact performance.', |
|
'suggestion': 'Use references or ownership transfer where possible.', |
|
}, |
|
{ |
|
'name': 'Inefficient vector operations', |
|
'pattern': r'for\s+.*?\{.*?\.push\(.*?\}', |
|
'severity': 'medium', |
|
'description': 'Repeatedly pushing to vectors can cause multiple allocations.', |
|
'suggestion': 'Pre-allocate vectors with Vec::with_capacity() when the size is known.', |
|
}, |
|
{ |
|
'name': 'Box allocation in loops', |
|
'pattern': r'for\s+.*?\{.*?Box::new\(.*?\}', |
|
'severity': 'medium', |
|
'description': 'Allocating boxes in loops can be inefficient.', |
|
'suggestion': 'Allocate memory outside the loop when possible.', |
|
}, |
|
] |
|
|
|
def analyze_repository(self, repo_path, languages): |
|
""" |
|
Analyze code performance in a repository for the specified languages using parallel processing. |
|
|
|
Args: |
|
repo_path (str): The path to the repository. |
|
languages (list): A list of programming languages to analyze. |
|
|
|
Returns: |
|
dict: A dictionary containing performance analysis results for each language. |
|
""" |
|
logger.info(f"Analyzing performance in repository at {repo_path} for languages: {languages}") |
|
|
|
results = {} |
|
|
|
|
|
def analyze_language(language): |
|
if language in self.analyzers: |
|
try: |
|
logger.info(f"Analyzing {language} code performance in {repo_path}") |
|
return language, self.analyzers[language](repo_path) |
|
except Exception as e: |
|
logger.error(f"Error analyzing {language} code performance: {e}") |
|
return language, { |
|
'status': 'error', |
|
'error': str(e), |
|
'issues': [], |
|
} |
|
else: |
|
logger.warning(f"No performance analyzer available for {language}") |
|
return language, { |
|
'status': 'not_supported', |
|
'message': f"Performance analysis for {language} is not supported yet.", |
|
'issues': [], |
|
} |
|
|
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=min(len(languages), 5)) as executor: |
|
|
|
future_to_language = {executor.submit(analyze_language, language): language for language in languages} |
|
|
|
|
|
for future in concurrent.futures.as_completed(future_to_language): |
|
language = future_to_language[future] |
|
try: |
|
lang, result = future.result() |
|
results[lang] = result |
|
logger.info(f"Completed performance analysis for {lang}") |
|
except Exception as e: |
|
logger.error(f"Exception occurred during performance analysis of {language}: {e}") |
|
results[language] = { |
|
'status': 'error', |
|
'error': str(e), |
|
'issues': [], |
|
} |
|
|
|
|
|
hotspots = self._identify_hotspots(results) |
|
|
|
return { |
|
'language_results': results, |
|
'hotspots': hotspots, |
|
} |
|
|
|
def _identify_hotspots(self, results): |
|
""" |
|
Identify performance hotspots across all languages. |
|
|
|
Args: |
|
results (dict): Performance analysis results for each language. |
|
|
|
Returns: |
|
list: A list of hotspot files with multiple performance issues. |
|
""" |
|
|
|
file_issue_count = defaultdict(int) |
|
file_issues = defaultdict(list) |
|
|
|
for language, language_result in results.items(): |
|
for issue in language_result.get('issues', []): |
|
file_path = issue.get('file', '') |
|
if file_path: |
|
file_issue_count[file_path] += 1 |
|
file_issues[file_path].append(issue) |
|
|
|
|
|
hotspots = [] |
|
for file_path, count in sorted(file_issue_count.items(), key=lambda x: x[1], reverse=True): |
|
if count >= 2: |
|
hotspots.append({ |
|
'file': file_path, |
|
'issue_count': count, |
|
'issues': file_issues[file_path], |
|
}) |
|
|
|
return hotspots[:10] |
|
|
|
def _analyze_python_performance(self, repo_path): |
|
""" |
|
Analyze Python code for performance issues. |
|
|
|
Args: |
|
repo_path (str): The path to the repository. |
|
|
|
Returns: |
|
dict: Performance analysis results for Python code. |
|
""" |
|
logger.info(f"Analyzing Python code performance in {repo_path}") |
|
|
|
|
|
python_files = [] |
|
for root, _, files in os.walk(repo_path): |
|
for file in files: |
|
if file.endswith('.py'): |
|
python_files.append(os.path.join(root, file)) |
|
|
|
if not python_files: |
|
return { |
|
'status': 'no_files', |
|
'message': 'No Python files found in the repository.', |
|
'issues': [], |
|
} |
|
|
|
|
|
issues = [] |
|
for file_path in python_files: |
|
try: |
|
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: |
|
content = f.read() |
|
|
|
|
|
for pattern in self.python_patterns: |
|
matches = re.finditer(pattern['pattern'], content) |
|
for match in matches: |
|
line_number = content[:match.start()].count('\n') + 1 |
|
code_snippet = match.group(0) |
|
|
|
issues.append({ |
|
'file': file_path, |
|
'line': line_number, |
|
'code': code_snippet, |
|
'issue': pattern['name'], |
|
'description': pattern['description'], |
|
'suggestion': pattern['suggestion'], |
|
'severity': pattern['severity'], |
|
'language': 'Python', |
|
}) |
|
except Exception as e: |
|
logger.error(f"Error analyzing Python file {file_path}: {e}") |
|
|
|
|
|
issues_by_severity = defaultdict(list) |
|
for issue in issues: |
|
severity = issue.get('severity', 'unknown') |
|
issues_by_severity[severity].append(issue) |
|
|
|
return { |
|
'status': 'success', |
|
'issues': issues, |
|
'issues_by_severity': dict(issues_by_severity), |
|
'issue_count': len(issues), |
|
'files_analyzed': len(python_files), |
|
} |
|
|
|
def _analyze_javascript_performance(self, repo_path): |
|
""" |
|
Analyze JavaScript code for performance issues. |
|
|
|
Args: |
|
repo_path (str): The path to the repository. |
|
|
|
Returns: |
|
dict: Performance analysis results for JavaScript code. |
|
""" |
|
logger.info(f"Analyzing JavaScript code performance in {repo_path}") |
|
|
|
|
|
js_files = [] |
|
for root, _, files in os.walk(repo_path): |
|
if 'node_modules' in root: |
|
continue |
|
for file in files: |
|
if file.endswith(('.js', '.jsx')): |
|
js_files.append(os.path.join(root, file)) |
|
|
|
if not js_files: |
|
return { |
|
'status': 'no_files', |
|
'message': 'No JavaScript files found in the repository.', |
|
'issues': [], |
|
} |
|
|
|
|
|
issues = [] |
|
for file_path in js_files: |
|
try: |
|
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: |
|
content = f.read() |
|
|
|
|
|
for pattern in self.javascript_patterns: |
|
matches = re.finditer(pattern['pattern'], content) |
|
for match in matches: |
|
line_number = content[:match.start()].count('\n') + 1 |
|
code_snippet = match.group(0) |
|
|
|
issues.append({ |
|
'file': file_path, |
|
'line': line_number, |
|
'code': code_snippet, |
|
'issue': pattern['name'], |
|
'description': pattern['description'], |
|
'suggestion': pattern['suggestion'], |
|
'severity': pattern['severity'], |
|
'language': 'JavaScript', |
|
}) |
|
except Exception as e: |
|
logger.error(f"Error analyzing JavaScript file {file_path}: {e}") |
|
|
|
|
|
issues_by_severity = defaultdict(list) |
|
for issue in issues: |
|
severity = issue.get('severity', 'unknown') |
|
issues_by_severity[severity].append(issue) |
|
|
|
return { |
|
'status': 'success', |
|
'issues': issues, |
|
'issues_by_severity': dict(issues_by_severity), |
|
'issue_count': len(issues), |
|
'files_analyzed': len(js_files), |
|
} |
|
|
|
def _analyze_typescript_performance(self, repo_path): |
|
""" |
|
Analyze TypeScript code for performance issues. |
|
|
|
Args: |
|
repo_path (str): The path to the repository. |
|
|
|
Returns: |
|
dict: Performance analysis results for TypeScript code. |
|
""" |
|
logger.info(f"Analyzing TypeScript code performance in {repo_path}") |
|
|
|
|
|
ts_files = [] |
|
for root, _, files in os.walk(repo_path): |
|
if 'node_modules' in root: |
|
continue |
|
for file in files: |
|
if file.endswith(('.ts', '.tsx')): |
|
ts_files.append(os.path.join(root, file)) |
|
|
|
if not ts_files: |
|
return { |
|
'status': 'no_files', |
|
'message': 'No TypeScript files found in the repository.', |
|
'issues': [], |
|
} |
|
|
|
|
|
issues = [] |
|
for file_path in ts_files: |
|
try: |
|
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: |
|
content = f.read() |
|
|
|
|
|
for pattern in self.typescript_patterns: |
|
matches = re.finditer(pattern['pattern'], content) |
|
for match in matches: |
|
line_number = content[:match.start()].count('\n') + 1 |
|
code_snippet = match.group(0) |
|
|
|
issues.append({ |
|
'file': file_path, |
|
'line': line_number, |
|
'code': code_snippet, |
|
'issue': pattern['name'], |
|
'description': pattern['description'], |
|
'suggestion': pattern['suggestion'], |
|
'severity': pattern['severity'], |
|
'language': 'TypeScript', |
|
}) |
|
except Exception as e: |
|
logger.error(f"Error analyzing TypeScript file {file_path}: {e}") |
|
|
|
|
|
issues_by_severity = defaultdict(list) |
|
for issue in issues: |
|
severity = issue.get('severity', 'unknown') |
|
issues_by_severity[severity].append(issue) |
|
|
|
return { |
|
'status': 'success', |
|
'issues': issues, |
|
'issues_by_severity': dict(issues_by_severity), |
|
'issue_count': len(issues), |
|
'files_analyzed': len(ts_files), |
|
} |
|
|
|
def _analyze_java_performance(self, repo_path): |
|
""" |
|
Analyze Java code for performance issues. |
|
|
|
Args: |
|
repo_path (str): The path to the repository. |
|
|
|
Returns: |
|
dict: Performance analysis results for Java code. |
|
""" |
|
logger.info(f"Analyzing Java code performance in {repo_path}") |
|
|
|
|
|
java_files = [] |
|
for root, _, files in os.walk(repo_path): |
|
for file in files: |
|
if file.endswith('.java'): |
|
java_files.append(os.path.join(root, file)) |
|
|
|
if not java_files: |
|
return { |
|
'status': 'no_files', |
|
'message': 'No Java files found in the repository.', |
|
'issues': [], |
|
} |
|
|
|
|
|
issues = [] |
|
for file_path in java_files: |
|
try: |
|
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: |
|
content = f.read() |
|
|
|
|
|
for pattern in self.java_patterns: |
|
matches = re.finditer(pattern['pattern'], content) |
|
for match in matches: |
|
line_number = content[:match.start()].count('\n') + 1 |
|
code_snippet = match.group(0) |
|
|
|
issues.append({ |
|
'file': file_path, |
|
'line': line_number, |
|
'code': code_snippet, |
|
'issue': pattern['name'], |
|
'description': pattern['description'], |
|
'suggestion': pattern['suggestion'], |
|
'severity': pattern['severity'], |
|
'language': 'Java', |
|
}) |
|
except Exception as e: |
|
logger.error(f"Error analyzing Java file {file_path}: {e}") |
|
|
|
|
|
issues_by_severity = defaultdict(list) |
|
for issue in issues: |
|
severity = issue.get('severity', 'unknown') |
|
issues_by_severity[severity].append(issue) |
|
|
|
return { |
|
'status': 'success', |
|
'issues': issues, |
|
'issues_by_severity': dict(issues_by_severity), |
|
'issue_count': len(issues), |
|
'files_analyzed': len(java_files), |
|
} |
|
|
|
def _analyze_go_performance(self, repo_path): |
|
""" |
|
Analyze Go code for performance issues. |
|
|
|
Args: |
|
repo_path (str): The path to the repository. |
|
|
|
Returns: |
|
dict: Performance analysis results for Go code. |
|
""" |
|
logger.info(f"Analyzing Go code performance in {repo_path}") |
|
|
|
|
|
go_files = [] |
|
for root, _, files in os.walk(repo_path): |
|
for file in files: |
|
if file.endswith('.go'): |
|
go_files.append(os.path.join(root, file)) |
|
|
|
if not go_files: |
|
return { |
|
'status': 'no_files', |
|
'message': 'No Go files found in the repository.', |
|
'issues': [], |
|
} |
|
|
|
|
|
issues = [] |
|
for file_path in go_files: |
|
try: |
|
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: |
|
content = f.read() |
|
|
|
|
|
for pattern in self.go_patterns: |
|
matches = re.finditer(pattern['pattern'], content) |
|
for match in matches: |
|
line_number = content[:match.start()].count('\n') + 1 |
|
code_snippet = match.group(0) |
|
|
|
issues.append({ |
|
'file': file_path, |
|
'line': line_number, |
|
'code': code_snippet, |
|
'issue': pattern['name'], |
|
'description': pattern['description'], |
|
'suggestion': pattern['suggestion'], |
|
'severity': pattern['severity'], |
|
'language': 'Go', |
|
}) |
|
except Exception as e: |
|
logger.error(f"Error analyzing Go file {file_path}: {e}") |
|
|
|
|
|
issues_by_severity = defaultdict(list) |
|
for issue in issues: |
|
severity = issue.get('severity', 'unknown') |
|
issues_by_severity[severity].append(issue) |
|
|
|
return { |
|
'status': 'success', |
|
'issues': issues, |
|
'issues_by_severity': dict(issues_by_severity), |
|
'issue_count': len(issues), |
|
'files_analyzed': len(go_files), |
|
} |
|
|
|
def _analyze_rust_performance(self, repo_path): |
|
""" |
|
Analyze Rust code for performance issues. |
|
|
|
Args: |
|
repo_path (str): The path to the repository. |
|
|
|
Returns: |
|
dict: Performance analysis results for Rust code. |
|
""" |
|
logger.info(f"Analyzing Rust code performance in {repo_path}") |
|
|
|
|
|
rust_files = [] |
|
for root, _, files in os.walk(repo_path): |
|
for file in files: |
|
if file.endswith('.rs'): |
|
rust_files.append(os.path.join(root, file)) |
|
|
|
if not rust_files: |
|
return { |
|
'status': 'no_files', |
|
'message': 'No Rust files found in the repository.', |
|
'issues': [], |
|
} |
|
|
|
|
|
issues = [] |
|
for file_path in rust_files: |
|
try: |
|
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: |
|
content = f.read() |
|
|
|
|
|
for pattern in self.rust_patterns: |
|
matches = re.finditer(pattern['pattern'], content) |
|
for match in matches: |
|
line_number = content[:match.start()].count('\n') + 1 |
|
code_snippet = match.group(0) |
|
|
|
issues.append({ |
|
'file': file_path, |
|
'line': line_number, |
|
'code': code_snippet, |
|
'issue': pattern['name'], |
|
'description': pattern['description'], |
|
'suggestion': pattern['suggestion'], |
|
'severity': pattern['severity'], |
|
'language': 'Rust', |
|
}) |
|
except Exception as e: |
|
logger.error(f"Error analyzing Rust file {file_path}: {e}") |
|
|
|
|
|
issues_by_severity = defaultdict(list) |
|
for issue in issues: |
|
severity = issue.get('severity', 'unknown') |
|
issues_by_severity[severity].append(issue) |
|
|
|
return { |
|
'status': 'success', |
|
'issues': issues, |
|
'issues_by_severity': dict(issues_by_severity), |
|
'issue_count': len(issues), |
|
'files_analyzed': len(rust_files), |
|
} |