Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -18,15 +18,19 @@ user_input = st.text_input("Enter your message:")
|
|
18 |
# Preprocess and generate response when the user hits Enter
|
19 |
if user_input:
|
20 |
if user_input.lower() == "quit":
|
21 |
-
st.stop()
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
logits = outputs.logits
|
26 |
-
predicted_class_id = logits.argmax(-1).item()
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
31 |
|
32 |
-
|
|
|
|
|
|
18 |
# Preprocess and generate response when the user hits Enter
|
19 |
if user_input:
|
20 |
if user_input.lower() == "quit":
|
21 |
+
st.stop()
|
22 |
|
23 |
+
# Encode the user input
|
24 |
+
input_ids = tokenizer.encode(user_input, return_tensors='pt')
|
|
|
|
|
25 |
|
26 |
+
# Generate a response (adjust parameters for control)
|
27 |
+
output_sequences = model.generate(
|
28 |
+
input_ids=input_ids,
|
29 |
+
max_length=50, # Example max response length
|
30 |
+
temperature=0.8, # Controls creativity
|
31 |
+
# ... other generation parameters ...
|
32 |
+
)
|
33 |
|
34 |
+
# Decode the generated text and display
|
35 |
+
generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
|
36 |
+
st.write(generated_text)
|