Vineedhar commited on
Commit
cc728ab
·
verified ·
1 Parent(s): 075fa02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -46
app.py CHANGED
@@ -1,50 +1,22 @@
1
- import streamlit as st
2
- import torch
3
- from transformers import pipeline
4
 
5
- def main():
6
- # Set up the page
7
- st.set_page_config(page_title="Nudge Generator Demo - tinyllama 1b", page_icon="orYx logo.png")
8
-
9
- # Title and logo
10
- col1, col2 = st.columns([3, 1])
11
- with col1:
12
- st.title("Nudge Generator Demo - tinyllama 1b")
13
- with col2:
14
- st.image("orYx logo.png")
15
-
16
- # Chat interface
17
- st.markdown("---")
18
- st.header("Nudge Response")
19
-
20
- # Input for user-provided message
21
- user_message = st.text_area("Enter your message:")
22
-
23
- if st.button("Generate Nudge"):
24
- if user_message.strip():
25
- with st.spinner("Generating Nudge..."):
26
- # Load the pipeline
27
- pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
28
 
29
- # Define the message structure
30
- messages = [
31
- {
32
- "role": "system",
33
- "content": "You are a personal corporate trainer that teaches me how to perform well in my organization.",
34
- },
35
- {"role": "user", "content": user_message},
36
- ]
37
-
38
- # Generate the prompt
39
- prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
40
-
41
- # Generate the response
42
- outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
43
-
44
- # Display the response
45
- st.text_area("Chatbot Response:", outputs[0]["generated_text"], height=200)
46
- else:
47
- st.warning("Please enter a message to get a response.")
48
 
49
  if __name__ == "__main__":
50
- main()
 
1
+ import gradio as gr
 
 
2
 
3
+ # Load the model
4
+ model_interface = gr.load("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # Create a Gradio interface with custom title and logo
7
+ def main():
8
+ with gr.Blocks() as demo:
9
+ # Add the title
10
+ gr.Markdown("# Tiny llama 1B Demo")
11
+
12
+ # Add the logo
13
+ gr.Image("orYx logo.png", elem_id="logo", show_label=False, interactive=False)
14
+
15
+ # Embed the model interface inside a Row
16
+ with gr.Row():
17
+ model_interface.render() # Use render to include the prebuilt interface
18
+
19
+ demo.launch()
 
 
 
 
 
20
 
21
  if __name__ == "__main__":
22
+ main()