Vineedhar commited on
Commit
b74ef1e
·
verified ·
1 Parent(s): e7b358d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -30
app.py CHANGED
@@ -1,33 +1,50 @@
1
- import gradio as gr
2
-
3
- # Load the model from Hugging Face
4
- def load_model():
5
- try:
6
- model_interface = gr.load("huggingface/TinyLlama/TinyLlama-1.1B-Chat-v1.0")
7
- return model_interface
8
- except ValueError as e:
9
- print(f"Error loading the model: {e}")
10
- return None
11
-
12
- # Create a Gradio interface with custom title and logo
13
  def main():
14
- model_interface = load_model()
15
- if model_interface is None:
16
- print("Failed to load the model. Exiting.")
17
- return
18
-
19
- with gr.Blocks() as demo:
20
- # Add the title
21
- gr.Markdown("# TinyLlama Demo")
22
-
23
- # Add the logo
24
- gr.Image("orYx logo.png", elem_id="logo", show_label=False, interactive=False)
25
-
26
- # Embed the model interface inside a Row
27
- with gr.Row():
28
- model_interface.render() # Use render to include the prebuilt interface
29
-
30
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  if __name__ == "__main__":
33
- main()
 
1
+ import streamlit as st
2
+ import torch
3
+ from transformers import pipeline
4
+
 
 
 
 
 
 
 
 
5
  def main():
6
+ # Set up the page
7
+ st.set_page_config(page_title="Nudge Generator Demo - tinyllama 1b", page_icon="orYx logo.png")
8
+
9
+ # Title and logo
10
+ col1, col2 = st.columns([3, 1])
11
+ with col1:
12
+ st.title("Nudge Generator Demo - tinyllama 1b")
13
+ with col2:
14
+ st.image("orYx logo.png")
15
+
16
+ # Chat interface
17
+ st.markdown("---")
18
+ st.header("Nudge Response")
19
+
20
+ # Input for user-provided message
21
+ user_message = st.text_area("Enter your message:")
22
+
23
+ if st.button("Generate Nudge"):
24
+ if user_message.strip():
25
+ with st.spinner("Generating Nudge..."):
26
+ # Load the pipeline
27
+ pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
28
+
29
+ # Define the message structure
30
+ messages = [
31
+ {
32
+ "role": "system",
33
+ "content": "You are a personal corporate trainer that teaches me how to perform well in my organization.",
34
+ },
35
+ {"role": "user", "content": user_message},
36
+ ]
37
+
38
+ # Generate the prompt
39
+ prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
40
+
41
+ # Generate the response
42
+ outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
43
+
44
+ # Display the response
45
+ st.text_area("Chatbot Response:", outputs[0]["generated_text"], height=200)
46
+ else:
47
+ st.warning("Please enter a message to get a response.")
48
 
49
  if __name__ == "__main__":
50
+ main()