awacke1 commited on
Commit
7c520c0
Β·
verified Β·
1 Parent(s): fe53471

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +182 -0
app.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import streamlit.components.v1 as components
3
+ from transformers import pipeline
4
+ from diffusers import StableDiffusionPipeline
5
+ from datasets import load_dataset
6
+ from peft import PeftConfig
7
+ from accelerate import Accelerator
8
+ from optimum.onnxruntime import ORTModelForSequenceClassification
9
+ import torch
10
+ import time
11
+
12
+ # Cache resource-intensive models
13
+ @st.cache_resource
14
+ def load_diffuser_model():
15
+ return StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
16
+
17
+ # Sidebar navigation
18
+ st.sidebar.title("πŸ€— Libraries Demo")
19
+ st.sidebar.markdown("Explore text πŸ“, images πŸ–ΌοΈ, and model ops πŸ”— with Hugging Face and Arcee!")
20
+ page = st.sidebar.selectbox(
21
+ "Choose a Section",
22
+ [
23
+ "🏠 Home",
24
+ "πŸ”„ Workflow",
25
+ "πŸ“ Transformers",
26
+ "πŸ–ΌοΈ Diffusers",
27
+ "πŸ“Š Datasets",
28
+ "βš™οΈ PEFT",
29
+ "πŸš€ Accelerate",
30
+ "⚑ Optimum",
31
+ "πŸ“š DistillKit",
32
+ "πŸ”— MergeKit",
33
+ "❄️ Spectrum"
34
+ ],
35
+ help="Select a library to explore!"
36
+ )
37
+
38
+ # Mermaid graph for DistillKit, MergeKit, and Spectrum workflows
39
+ mermaid_code = """
40
+ graph TD
41
+ subgraph DistillKit
42
+ A1[Load Teacher Model] --> B1[Load Student Model]
43
+ B1 --> C1[Configure Distillation]
44
+ C1 --> D1[Perform Distillation]
45
+ D1 --> E1[Evaluate Model]
46
+ end
47
+ subgraph MergeKit
48
+ A2[Select Models] --> B2[Choose Merge Method]
49
+ B2 --> C2[Set Parameters]
50
+ C2 --> D2[Merge Models]
51
+ D2 --> E2[Save Merged Model]
52
+ end
53
+ subgraph Spectrum
54
+ A3[Load Model] --> B3[Analyze Layers]
55
+ B3 --> C3[Generate Config]
56
+ C3 --> D3[Apply Freezing]
57
+ D3 --> E3[Train/Evaluate Model]
58
+ end
59
+ """
60
+
61
+ # Home Page
62
+ if page == "🏠 Home":
63
+ st.title("Hugging Face & Arcee Libraries Demo 🌟")
64
+ st.markdown("""
65
+ Welcome to an interactive demo of powerful libraries for text, image, and model processing!
66
+ - **πŸ“ Text**: Analyze or generate text with Transformers.
67
+ - **πŸ–ΌοΈ Images**: Create visuals with Diffusers.
68
+ - **πŸ”— Models**: Distill, merge, and optimize with Arcee's DistillKit, MergeKit, and Spectrum.
69
+ Navigate via the sidebar to explore each library!
70
+ """)
71
+
72
+ # Workflow Page with Mermaid Graph
73
+ elif page == "πŸ”„ Workflow":
74
+ st.header("πŸ”„ Workflows: DistillKit, MergeKit, Spectrum")
75
+ st.markdown("See how inputs flow to outputs in Arcee’s libraries with this Mermaid graph:")
76
+ components.html(f"""
77
+ <div id="mermaid"></div>
78
+ <script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
79
+ <script>
80
+ mermaid.initialize({{ startOnLoad: true }});
81
+ const mermaidCode = `{mermaid_code}`;
82
+ mermaid.render('graph', mermaidCode, (svgCode) => {{
83
+ document.getElementById('mermaid').innerHTML = svgCode;
84
+ }});
85
+ </script>
86
+ """, height=600)
87
+
88
+ # Transformers Section
89
+ elif page == "πŸ“ Transformers":
90
+ st.header("πŸ“ Transformers")
91
+ st.markdown("Process text with pre-trained models.")
92
+ task = st.selectbox("Task", ["Sentiment Analysis", "Text Generation"])
93
+ text = st.text_area("Input Text", "")
94
+ if st.button("Run") and text:
95
+ with st.spinner("Processing..."):
96
+ if task == "Sentiment Analysis":
97
+ result = pipeline("sentiment-analysis")(text)
98
+ st.write(f"Result: {result[0]['label']} (Score: {result[0]['score']:.2f})")
99
+ else:
100
+ result = pipeline("text-generation")(text, max_length=50)[0]['generated_text']
101
+ st.write(f"Generated: {result}")
102
+
103
+ # Diffusers Section
104
+ elif page == "πŸ–ΌοΈ Diffusers":
105
+ st.header("πŸ–ΌοΈ Diffusers")
106
+ st.markdown("Generate images from text.")
107
+ prompt = st.text_input("Prompt", "A futuristic city")
108
+ if st.button("Generate"):
109
+ with st.spinner("Generating..."):
110
+ pipe = load_diffuser_model()
111
+ image = pipe(prompt).images[0]
112
+ st.image(image, caption=prompt)
113
+
114
+ # Datasets Section
115
+ elif page == "πŸ“Š Datasets":
116
+ st.header("πŸ“Š Datasets")
117
+ st.markdown("Load and explore datasets.")
118
+ dataset = st.selectbox("Dataset", ["imdb", "squad"])
119
+ if st.button("Load"):
120
+ data = load_dataset(dataset, split="train[:5]")
121
+ st.write(data)
122
+
123
+ # PEFT Section
124
+ elif page == "βš™οΈ PEFT":
125
+ st.header("βš™οΈ PEFT")
126
+ st.markdown("Parameter-efficient fine-tuning.")
127
+ text = st.text_area("Text", "")
128
+ if st.button("Classify") and text:
129
+ st.write("Simulated PEFT classification: Positive")
130
+
131
+ # Accelerate Section
132
+ elif page == "πŸš€ Accelerate":
133
+ st.header("πŸš€ Accelerate")
134
+ st.markdown("Optimize across devices.")
135
+ text = st.text_area("Text", "")
136
+ if st.button("Analyze") and text:
137
+ accelerator = Accelerator()
138
+ result = pipeline("sentiment-analysis")(text)
139
+ st.write(f"Result: {result[0]['label']} (Score: {result[0]['score']:.2f})")
140
+
141
+ # Optimum Section
142
+ elif page == "⚑ Optimum":
143
+ st.header("⚑ Optimum")
144
+ st.markdown("Hardware-accelerated inference.")
145
+ text = st.text_area("Text", "")
146
+ if st.button("Classify") and text:
147
+ st.write("Simulated Optimum result: Positive")
148
+
149
+ # DistillKit Section
150
+ elif page == "πŸ“š DistillKit":
151
+ st.header("πŸ“š DistillKit: Model Distillation")
152
+ st.markdown("Distill large models into smaller, efficient ones. Here are the top 5 functions:")
153
+
154
+ # 1. Load teacher model
155
+ teacher = st.selectbox("Teacher Model", ["arcee-ai/Arcee-Spark", "bert-base-uncased"])
156
+ st.write(f"1. Loaded teacher: {teacher}")
157
+
158
+ # 2. Load student model
159
+ student = st.selectbox("Student Model", ["Qwen/Qwen2-1.5B", "distilbert-base-uncased"])
160
+ st.write(f"2. Loaded student: {student}")
161
+
162
+ # 3. Configure distillation
163
+ temp = st.slider("Temperature", 1.0, 5.0, 2.0)
164
+ alpha = st.slider("Alpha", 0.0, 1.0, 0.5)
165
+ st.write(f"3. Config: Temp={temp}, Alpha={alpha}")
166
+
167
+ # 4. Perform distillation (simulated)
168
+ if st.button("Distill"):
169
+ with st.spinner("Distilling..."):
170
+ time.sleep(2)
171
+ st.success("4. Distillation complete!")
172
+
173
+ # 5. Evaluate distilled model
174
+ st.write("5. Evaluating...")
175
+ metrics = {"accuracy": 0.85, "loss": 0.12}
176
+ st.write(f"Metrics: {metrics}")
177
+
178
+ st.markdown("""
179
+ **How It Works:**
180
+ DistillKit compresses a teacher model into a student model using distillation techniques.
181
+ ```python
182
+ config = {"teacher": "arcee-ai/Arcee-Spark", "student": "Qwen/Qwen2-1.5B", "temp": 2.0, "alpha": 0.5}