Arnesh27 commited on
Commit
0357904
·
verified ·
1 Parent(s): c15417b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -16,21 +16,21 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  model.to(device)
17
 
18
  def generate_code(prompt):
19
- # Add context to the prompt to clarify the output
20
- full_prompt = f"Generate a basic HTML template for a personal blog. {prompt}"
21
 
22
- # Tokenize the input and set pad token
23
  input_tensor = tokenizer(full_prompt, return_tensors="pt", padding=True, truncation=True).to(device)
24
 
25
  # Generate code with attention mask
26
  with torch.no_grad():
27
  generated_ids = model.generate(
28
  input_tensor['input_ids'],
29
- attention_mask=input_tensor['attention_mask'], # Include attention mask
30
- max_length=300, # Adjust this length as needed
31
- num_beams=5, # This controls the diversity of outputs
32
  early_stopping=True,
33
- pad_token_id=tokenizer.pad_token_id # Set pad token id
34
  )
35
 
36
  # Decode and return the generated code
 
16
  model.to(device)
17
 
18
  def generate_code(prompt):
19
+ # Refined prompt
20
+ full_prompt = f"Please generate a basic HTML template for a personal blog. The output should be only the HTML code."
21
 
22
+ # Tokenize the input
23
  input_tensor = tokenizer(full_prompt, return_tensors="pt", padding=True, truncation=True).to(device)
24
 
25
  # Generate code with attention mask
26
  with torch.no_grad():
27
  generated_ids = model.generate(
28
  input_tensor['input_ids'],
29
+ attention_mask=input_tensor['attention_mask'],
30
+ max_length=500, # Increase if necessary to capture full HTML
31
+ num_beams=5,
32
  early_stopping=True,
33
+ pad_token_id=tokenizer.pad_token_id
34
  )
35
 
36
  # Decode and return the generated code