Update app.py
Browse files
app.py
CHANGED
@@ -384,10 +384,10 @@ def extract_functions(gemini_output):
|
|
384 |
|
385 |
def validate_and_generate_documentation(api_url, headers, gemini_output, file_contents, functionality_description):
|
386 |
"""
|
387 |
-
Uses the Hugging Face Inference API to generate
|
388 |
"""
|
389 |
# Generate the refined prompt for the Qwen model
|
390 |
-
|
391 |
User-specified functionality: '{functionality_description}'
|
392 |
Functions identified by Gemini:
|
393 |
{gemini_output}
|
@@ -419,43 +419,43 @@ def validate_and_generate_documentation(api_url, headers, gemini_output, file_co
|
|
419 |
- Example Usage: <Example demonstrating how to use the function>
|
420 |
"""
|
421 |
|
422 |
-
#
|
423 |
-
|
424 |
-
|
|
|
425 |
|
426 |
-
|
427 |
-
|
428 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
429 |
|
430 |
-
|
431 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
432 |
if isinstance(api_response, list):
|
433 |
output = api_response[0].get("generated_text", "")
|
434 |
elif isinstance(api_response, dict):
|
435 |
output = api_response.get("generated_text", "")
|
436 |
else:
|
437 |
-
|
438 |
-
|
439 |
full_output += output
|
|
|
|
|
440 |
|
441 |
-
|
442 |
-
if "end_of_text" in output or len(output.strip()) == 0:
|
443 |
-
break
|
444 |
-
|
445 |
-
# Prepare the payload for continuation
|
446 |
-
payload = {"inputs": full_output, "parameters": {"max_new_tokens": 2048}}
|
447 |
-
response = requests.post(api_url, headers=headers, json=payload)
|
448 |
-
|
449 |
-
if response.status_code == 200:
|
450 |
-
api_response = response.json()
|
451 |
-
else:
|
452 |
-
raise ValueError(f"Error during continuation: {response.status_code}, {response.text}")
|
453 |
|
454 |
-
# Clean the output to exclude prompts
|
455 |
-
cleaned_output = clean_output(full_output)
|
456 |
-
return cleaned_output
|
457 |
-
else:
|
458 |
-
raise ValueError(f"Error: {response.status_code}, {response.text}")
|
459 |
|
460 |
def generate_documentation_page():
|
461 |
st.subheader(f"Generate Documentation for {st.session_state.current_project}")
|
|
|
384 |
|
385 |
def validate_and_generate_documentation(api_url, headers, gemini_output, file_contents, functionality_description):
|
386 |
"""
|
387 |
+
Uses the Hugging Face Inference API to generate documentation in chunks to avoid token limits.
|
388 |
"""
|
389 |
# Generate the refined prompt for the Qwen model
|
390 |
+
base_prompt = f"""
|
391 |
User-specified functionality: '{functionality_description}'
|
392 |
Functions identified by Gemini:
|
393 |
{gemini_output}
|
|
|
419 |
- Example Usage: <Example demonstrating how to use the function>
|
420 |
"""
|
421 |
|
422 |
+
# Split file contents into chunks to avoid exceeding the token limit
|
423 |
+
max_chunk_size = 12000 # Adjust based on the tokenization overhead
|
424 |
+
file_chunks = []
|
425 |
+
current_chunk = base_prompt
|
426 |
|
427 |
+
for file_path, content in file_contents.items():
|
428 |
+
chunk_content = f"File: {os.path.basename(file_path)}\n{content}\n\n"
|
429 |
+
if len(current_chunk) + len(chunk_content) > max_chunk_size:
|
430 |
+
file_chunks.append(current_chunk)
|
431 |
+
current_chunk = base_prompt + chunk_content
|
432 |
+
else:
|
433 |
+
current_chunk += chunk_content
|
434 |
+
|
435 |
+
# Add the last chunk
|
436 |
+
if current_chunk not in file_chunks:
|
437 |
+
file_chunks.append(current_chunk)
|
438 |
|
439 |
+
# Process each chunk through the API
|
440 |
+
full_output = ""
|
441 |
+
for chunk in file_chunks:
|
442 |
+
payload = {"inputs": chunk, "parameters": {"max_new_tokens": 1024}}
|
443 |
+
response = requests.post(api_url, headers=headers, json=payload)
|
444 |
+
|
445 |
+
if response.status_code == 200:
|
446 |
+
api_response = response.json()
|
447 |
if isinstance(api_response, list):
|
448 |
output = api_response[0].get("generated_text", "")
|
449 |
elif isinstance(api_response, dict):
|
450 |
output = api_response.get("generated_text", "")
|
451 |
else:
|
452 |
+
raise ValueError("Unexpected response format from Hugging Face API.")
|
|
|
453 |
full_output += output
|
454 |
+
else:
|
455 |
+
raise ValueError(f"Error during API call: {response.status_code}, {response.text}")
|
456 |
|
457 |
+
return full_output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
458 |
|
|
|
|
|
|
|
|
|
|
|
459 |
|
460 |
def generate_documentation_page():
|
461 |
st.subheader(f"Generate Documentation for {st.session_state.current_project}")
|