Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -391,6 +391,55 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
391 |
|
392 |
import gradio as gr
|
393 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
394 |
|
395 |
# افتراضياً لديك هذه الدالة كما كتبتها
|
396 |
@spaces.GPU
|
@@ -491,9 +540,27 @@ with gr.Blocks(title="C# Validator Generator with 8 Tabs") as demo:
|
|
491 |
inputs=[model_names_input, shared_model_structure_input, shared_template_input],
|
492 |
outputs=[batch_code_output],
|
493 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
494 |
|
495 |
# Tab 3 to Tab 8 placeholders for future features
|
496 |
-
for i in range(
|
497 |
with gr.Tab(f"Tab {i} - Placeholder"):
|
498 |
gr.Markdown(f"Content for Tab {i} goes here...")
|
499 |
|
|
|
391 |
|
392 |
import gradio as gr
|
393 |
import torch
|
394 |
+
def generate_from_prompt(prompt):
|
395 |
+
try:
|
396 |
+
messages = [{"role": "user", "content": prompt}]
|
397 |
+
input_ids = tokenizer.apply_chat_template(
|
398 |
+
messages,
|
399 |
+
tokenize=True,
|
400 |
+
return_tensors="pt",
|
401 |
+
add_generation_prompt=True,
|
402 |
+
).to(model.device)
|
403 |
+
|
404 |
+
with torch.no_grad():
|
405 |
+
outputs = model.generate(
|
406 |
+
input_ids,
|
407 |
+
max_new_tokens=4096,
|
408 |
+
pad_token_id=tokenizer.eos_token_id,
|
409 |
+
eos_token_id=tokenizer.eos_token_id,
|
410 |
+
do_sample=True,
|
411 |
+
temperature=0.7,
|
412 |
+
top_p=0.95,
|
413 |
+
)
|
414 |
+
|
415 |
+
response = tokenizer.decode(outputs[0][input_ids.shape[-1]:], skip_special_tokens=True)
|
416 |
+
return response.strip()
|
417 |
+
|
418 |
+
except Exception as e:
|
419 |
+
return f"Error during generation: {e}"
|
420 |
+
def generate_model_from_description(description):
|
421 |
+
if not description.strip():
|
422 |
+
return "Error: Description is required."
|
423 |
+
|
424 |
+
prompt = f"""
|
425 |
+
You are an expert C# developer.
|
426 |
+
|
427 |
+
Generate a clean, professional C# entity model class based on the following project description:
|
428 |
+
|
429 |
+
{description}
|
430 |
+
|
431 |
+
Please output only the C# class code.
|
432 |
+
"""
|
433 |
+
|
434 |
+
code = generate_from_prompt(prompt)
|
435 |
+
|
436 |
+
# تنظيف الكود من علامات البرمجة اذا موجودة
|
437 |
+
if code.startswith("```csharp"):
|
438 |
+
code = code[len("```csharp"):].strip()
|
439 |
+
if code.endswith("```"):
|
440 |
+
code = code[:-len("```")].strip()
|
441 |
+
|
442 |
+
return code
|
443 |
|
444 |
# افتراضياً لديك هذه الدالة كما كتبتها
|
445 |
@spaces.GPU
|
|
|
540 |
inputs=[model_names_input, shared_model_structure_input, shared_template_input],
|
541 |
outputs=[batch_code_output],
|
542 |
)
|
543 |
+
with gr.Tab("Generate Model from Description"):
|
544 |
+
with gr.Row():
|
545 |
+
with gr.Column():
|
546 |
+
description_input = gr.Textbox(
|
547 |
+
label="Project Description",
|
548 |
+
lines=10,
|
549 |
+
placeholder="Enter your project or model description here..."
|
550 |
+
)
|
551 |
+
generate_model_desc_btn = gr.Button("Generate Model Code")
|
552 |
+
|
553 |
+
with gr.Column():
|
554 |
+
model_code_output_desc = gr.Code(label="Generated Model Code", lines=25)
|
555 |
+
|
556 |
+
generate_model_desc_btn.click(
|
557 |
+
fn=generate_model_from_description,
|
558 |
+
inputs=[description_input],
|
559 |
+
outputs=[model_code_output_desc]
|
560 |
+
)
|
561 |
|
562 |
# Tab 3 to Tab 8 placeholders for future features
|
563 |
+
for i in range(4, 9):
|
564 |
with gr.Tab(f"Tab {i} - Placeholder"):
|
565 |
gr.Markdown(f"Content for Tab {i} goes here...")
|
566 |
|