Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -46,27 +46,23 @@ Image: [IMAGE]
|
|
46 |
Description: """
|
47 |
|
48 |
# Tokenize input
|
49 |
-
inputs = tokenizer(input_text, return_tensors="pt")
|
50 |
|
51 |
# Generate response
|
52 |
with torch.no_grad():
|
53 |
outputs = model.generate(
|
54 |
-
|
55 |
-
|
56 |
-
max_new_tokens=max_length,
|
57 |
temperature=temperature,
|
58 |
top_p=top_p,
|
59 |
do_sample=True,
|
|
|
60 |
pad_token_id=tokenizer.pad_token_id,
|
61 |
-
|
62 |
-
eos_token_id=tokenizer.eos_token_id,
|
63 |
-
use_cache=True, # Enable caching
|
64 |
-
return_dict_in_generate=True, # Return as dict
|
65 |
-
output_scores=True # Get scores
|
66 |
)
|
67 |
|
68 |
# Decode and return the response
|
69 |
-
generated_text = tokenizer.decode(outputs
|
70 |
return generated_text.split("Description: ")[-1].strip()
|
71 |
|
72 |
def create_demo(model_id):
|
|
|
46 |
Description: """
|
47 |
|
48 |
# Tokenize input
|
49 |
+
inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)
|
50 |
|
51 |
# Generate response
|
52 |
with torch.no_grad():
|
53 |
outputs = model.generate(
|
54 |
+
**inputs,
|
55 |
+
max_length=max_length,
|
|
|
56 |
temperature=temperature,
|
57 |
top_p=top_p,
|
58 |
do_sample=True,
|
59 |
+
num_return_sequences=1,
|
60 |
pad_token_id=tokenizer.pad_token_id,
|
61 |
+
eos_token_id=tokenizer.eos_token_id
|
|
|
|
|
|
|
|
|
62 |
)
|
63 |
|
64 |
# Decode and return the response
|
65 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
66 |
return generated_text.split("Description: ")[-1].strip()
|
67 |
|
68 |
def create_demo(model_id):
|