Spaces:
Sleeping
Sleeping
Gabriel Okiri
commited on
Commit
·
4122a88
1
Parent(s):
f2f5107
test
Browse files
app.py
CHANGED
@@ -14,15 +14,15 @@ tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, trust_remote_code=True
|
|
14 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
15 |
model.to(device)
|
16 |
|
17 |
-
# Define generation configuration
|
18 |
generation_config = GenerationConfig(
|
19 |
-
max_length=
|
20 |
max_new_tokens=50, # Ensure sufficient tokens for your translations
|
21 |
num_beams=5, # Moderate number of beams for a balance between speed and quality
|
22 |
do_sample=False, # Disable sampling to make output deterministic
|
23 |
temperature=1.0, # Neutral temperature since sampling is off
|
24 |
-
top_k=
|
25 |
-
top_p=
|
26 |
repetition_penalty=4.0, # Neutral repetition penalty for translation
|
27 |
length_penalty=3.0, # No penalty for sequence length; modify if your translations tend to be too short/long
|
28 |
early_stopping=True # Stop early when all beams finish to speed up generation
|
@@ -74,4 +74,4 @@ iface = gr.Interface(
|
|
74 |
)
|
75 |
|
76 |
if __name__ == "__main__":
|
77 |
-
iface.launch()
|
|
|
14 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
15 |
model.to(device)
|
16 |
|
17 |
+
# Define generation configuration with a maximum length
|
18 |
generation_config = GenerationConfig(
|
19 |
+
max_length=150, # Set a maximum length for output
|
20 |
max_new_tokens=50, # Ensure sufficient tokens for your translations
|
21 |
num_beams=5, # Moderate number of beams for a balance between speed and quality
|
22 |
do_sample=False, # Disable sampling to make output deterministic
|
23 |
temperature=1.0, # Neutral temperature since sampling is off
|
24 |
+
top_k=None, # Set to None for deterministic generation
|
25 |
+
top_p=None, # Set to None for deterministic generation
|
26 |
repetition_penalty=4.0, # Neutral repetition penalty for translation
|
27 |
length_penalty=3.0, # No penalty for sequence length; modify if your translations tend to be too short/long
|
28 |
early_stopping=True # Stop early when all beams finish to speed up generation
|
|
|
74 |
)
|
75 |
|
76 |
if __name__ == "__main__":
|
77 |
+
iface.launch(share=True) # Set share=True to create a public link
|