Update app.py
Browse files
app.py
CHANGED
@@ -5,13 +5,13 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
5 |
import json
|
6 |
import os
|
7 |
|
8 |
-
# Load the model and tokenizer from Hugging Face
|
9 |
-
model_name = "
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
12 |
|
13 |
-
# Ensure the model runs on CPU for Hugging Face Spaces free tier
|
14 |
-
device = torch.device("
|
15 |
model.to(device)
|
16 |
|
17 |
# Cache to store recent prompts and responses with file-based persistence
|
@@ -42,11 +42,11 @@ def code_assistant(prompt, language):
|
|
42 |
# Tokenize the input
|
43 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
44 |
|
45 |
-
# Generate response with adjusted parameters for faster
|
46 |
outputs = model.generate(
|
47 |
inputs.input_ids,
|
48 |
max_length=128, # Shortened max length for quicker response
|
49 |
-
temperature=0.1, # Lower temperature for
|
50 |
top_p=0.8, # Slightly reduced top_p for quicker sampling
|
51 |
do_sample=True
|
52 |
)
|
@@ -73,7 +73,7 @@ iface = gr.Interface(
|
|
73 |
gr.Dropdown(choices=["Python", "JavaScript", "Java", "C++", "HTML", "CSS", "SQL", "Other"], label="Programming Language")
|
74 |
],
|
75 |
outputs="text",
|
76 |
-
title="
|
77 |
description="An AI code assistant to help you with coding queries, debugging, and code generation. Specify the programming language for more accurate responses."
|
78 |
)
|
79 |
|
|
|
5 |
import json
|
6 |
import os
|
7 |
|
8 |
+
# Load the CodeGen-2B-mono model and tokenizer from Hugging Face
|
9 |
+
model_name = "Salesforce/codegen-2B-mono" # Best version for CPU-friendly performance in code generation
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
12 |
|
13 |
+
# Ensure the model runs on CPU (important for Hugging Face Spaces free tier)
|
14 |
+
device = torch.device("cpu")
|
15 |
model.to(device)
|
16 |
|
17 |
# Cache to store recent prompts and responses with file-based persistence
|
|
|
42 |
# Tokenize the input
|
43 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
44 |
|
45 |
+
# Generate response with adjusted parameters for faster CPU response
|
46 |
outputs = model.generate(
|
47 |
inputs.input_ids,
|
48 |
max_length=128, # Shortened max length for quicker response
|
49 |
+
temperature=0.1, # Lower temperature for focused output
|
50 |
top_p=0.8, # Slightly reduced top_p for quicker sampling
|
51 |
do_sample=True
|
52 |
)
|
|
|
73 |
gr.Dropdown(choices=["Python", "JavaScript", "Java", "C++", "HTML", "CSS", "SQL", "Other"], label="Programming Language")
|
74 |
],
|
75 |
outputs="text",
|
76 |
+
title="Code Assistant with CodeGen-2B",
|
77 |
description="An AI code assistant to help you with coding queries, debugging, and code generation. Specify the programming language for more accurate responses."
|
78 |
)
|
79 |
|