Spaces:
Paused
Paused
llama8b fine tuned added
Browse files- app.py +9 -4
- requirements.txt +3 -1
app.py
CHANGED
@@ -8,7 +8,7 @@ def load_model(model_name):
|
|
8 |
return pipeline("summarization", model="ak2603/mt5-small-synthetic-data-plus-translated")
|
9 |
# Add space for other models here
|
10 |
elif model_name == "Llama 3.2":
|
11 |
-
return
|
12 |
elif model_name == "Llama 7b Instruct":
|
13 |
return None # Placeholder for future implementation
|
14 |
else:
|
@@ -75,12 +75,17 @@ with col2:
|
|
75 |
else:
|
76 |
with st.spinner("Generating summary..."):
|
77 |
try:
|
78 |
-
|
|
|
79 |
input_text,
|
80 |
-
max_length=150,
|
81 |
do_sample=True,
|
82 |
repetition_penalty=1.5
|
83 |
-
)[0]
|
|
|
|
|
|
|
|
|
84 |
|
85 |
st.success("**Generated Summary:**")
|
86 |
st.write(result)
|
|
|
8 |
return pipeline("summarization", model="ak2603/mt5-small-synthetic-data-plus-translated")
|
9 |
# Add space for other models here
|
10 |
elif model_name == "Llama 3.2":
|
11 |
+
return pipeline("text-generation", model="Walid777/llama3-8b-emails-summarization")
|
12 |
elif model_name == "Llama 7b Instruct":
|
13 |
return None # Placeholder for future implementation
|
14 |
else:
|
|
|
75 |
else:
|
76 |
with st.spinner("Generating summary..."):
|
77 |
try:
|
78 |
+
# Generate summary
|
79 |
+
summary_output = summarizer(
|
80 |
input_text,
|
81 |
+
max_length=150,
|
82 |
do_sample=True,
|
83 |
repetition_penalty=1.5
|
84 |
+
)[0]
|
85 |
+
|
86 |
+
# Dynamically select key based on pipeline task
|
87 |
+
result_key = 'summary_text' if summarizer.task == 'summarization' else 'generated_text'
|
88 |
+
result = summary_output[result_key]
|
89 |
|
90 |
st.success("**Generated Summary:**")
|
91 |
st.write(result)
|
requirements.txt
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
streamlit
|
2 |
transformers
|
3 |
torch
|
4 |
-
sentencepiece
|
|
|
|
|
|
1 |
streamlit
|
2 |
transformers
|
3 |
torch
|
4 |
+
sentencepiece
|
5 |
+
bitsandbytes
|
6 |
+
accelerate>=0.26.0
|