jatingocodeo commited on
Commit
44302df
·
verified ·
1 Parent(s): 1a1fb0e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -12
app.py CHANGED
@@ -53,7 +53,10 @@ def initialize():
53
  def generate_text(prompt, max_length=100, temperature=0.7, top_k=50):
54
  # Initialize if not already done
55
  if MODEL is None:
56
- initialize()
 
 
 
57
 
58
  try:
59
  # Process prompt
@@ -73,12 +76,12 @@ def generate_text(prompt, max_length=100, temperature=0.7, top_k=50):
73
  outputs = MODEL.generate(
74
  input_ids,
75
  max_length=min(max_length + len(input_ids[0]), 2048),
76
- temperature=temperature,
77
- top_k=top_k,
78
- do_sample=True,
 
79
  pad_token_id=TOKENIZER.pad_token_id,
80
  eos_token_id=TOKENIZER.eos_token_id,
81
- num_return_sequences=1
82
  )
83
 
84
  # Decode and return
@@ -86,8 +89,9 @@ def generate_text(prompt, max_length=100, temperature=0.7, top_k=50):
86
  return generated_text.strip()
87
 
88
  except Exception as e:
89
- print(f"Error generating text: {str(e)}")
90
- return f"An error occurred: {str(e)}"
 
91
 
92
  # Create Gradio interface
93
  iface = gr.Interface(
@@ -100,17 +104,20 @@ iface = gr.Interface(
100
  ],
101
  outputs=gr.Textbox(label="Generated Text", lines=5),
102
  title="SmolLM2 Text Generator",
103
- description="""Generate text using the fine-tuned SmolLM2 model.
104
- - Max Length: Controls the length of generated text
105
- - Temperature: Controls randomness (higher = more creative)
106
- - Top K: Controls diversity of word choices""",
107
  examples=[
108
  ["Once upon a time", 100, 0.7, 50],
109
  ["The quick brown fox", 150, 0.8, 40],
110
- ["In a galaxy far far away", 200, 0.9, 30],
111
  ],
112
  allow_flagging="never"
113
  )
114
 
 
 
 
 
 
 
 
115
  if __name__ == "__main__":
116
  iface.launch()
 
53
  def generate_text(prompt, max_length=100, temperature=0.7, top_k=50):
54
  # Initialize if not already done
55
  if MODEL is None:
56
+ try:
57
+ initialize()
58
+ except Exception as e:
59
+ return f"Failed to initialize model: {str(e)}"
60
 
61
  try:
62
  # Process prompt
 
76
  outputs = MODEL.generate(
77
  input_ids,
78
  max_length=min(max_length + len(input_ids[0]), 2048),
79
+ temperature=max(0.1, min(temperature, 1.0)), # Clamp temperature
80
+ top_k=max(1, min(top_k, 100)), # Clamp top_k
81
+ do_sample=True if temperature > 0 else False,
82
+ num_return_sequences=1,
83
  pad_token_id=TOKENIZER.pad_token_id,
84
  eos_token_id=TOKENIZER.eos_token_id,
 
85
  )
86
 
87
  # Decode and return
 
89
  return generated_text.strip()
90
 
91
  except Exception as e:
92
+ import traceback
93
+ traceback.print_exc()
94
+ return f"Error during text generation: {str(e)}"
95
 
96
  # Create Gradio interface
97
  iface = gr.Interface(
 
104
  ],
105
  outputs=gr.Textbox(label="Generated Text", lines=5),
106
  title="SmolLM2 Text Generator",
107
+ description="Generate text using the fine-tuned SmolLM2 model. Adjust parameters to control the generation.",
 
 
 
108
  examples=[
109
  ["Once upon a time", 100, 0.7, 50],
110
  ["The quick brown fox", 150, 0.8, 40],
 
111
  ],
112
  allow_flagging="never"
113
  )
114
 
115
+ # Initialize on startup
116
+ try:
117
+ initialize()
118
+ except Exception as e:
119
+ print(f"Warning: Model initialization failed: {str(e)}")
120
+ print("Model will be initialized on first request")
121
+
122
  if __name__ == "__main__":
123
  iface.launch()