File size: 2,869 Bytes
b74ef1e
 
 
 
6fbb774
 
 
 
 
 
 
 
 
 
 
 
 
 
cc728ab
b74ef1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28839ba
 
b74ef1e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import streamlit as st
import torch
from transformers import pipeline



# LICENSE.streamlit.Apachev2 	- 	Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2024) (https://github.com/streamlit/streamlit/blob/develop/LICENSE)

# LICENSE.torch.BSD-3			- 	Copyright (c) 2020, Philip Meier All rights reserved. (https://github.com/pmeier/light-the-torch/blob/main/LICENSE)

# LICENSE.tranformers.Apachev2	- 	Copyright 2020 The HuggingFace Team. All rights reserved. (https://github.com/huggingface/huggingface_hub/blob/main/LICENSE)

# LICENSE.TinyLlama.Apachev2 	- 	@misc{zhang2024tinyllama,title={TinyLlama: An Open-Source Small Language Model}, author={Peiyuan Zhang and Guangtao Zeng and Tianduo Wang and Wei Lu},year={2024},eprint={2401.02385},archivePrefix={arXiv},primaryClass={cs.CL}} (https://github.com/jzhang38/TinyLlama/blob/main/LICENSE)


# LICENSE.json.LGPL				-	Copyright:    (c) 2017-2019 by Brad Jasper (c) 2012-2017 by Trevor Lohrbeer (https://github.com/bradjasper/ImportJSON/blob/master/LICENSE)

# LICENSE.pymupdf.AGPL			- 	Copyright (C) 2023 Artifex Software, Inc. (https://github.com/pymupdf/PyMuPDF/blob/main/COPYING)
def main():
    # Set up the page
    st.set_page_config(page_title="Nudge Generator Demo - tinyllama 1b", page_icon="orYx logo.png")

    # Title and logo
    col1, col2 = st.columns([3, 1])
    with col1:
        st.title("Nudge Generator Demo - tinyllama 1b")
    with col2:
        st.image("orYx logo.png")

    # Chat interface
    st.markdown("---")
    st.header("Nudge Response")

    # Input for user-provided message
    user_message = st.text_area("Enter your message:")

    if st.button("Generate Nudge"):
        if user_message.strip():
            with st.spinner("Generating Nudge..."):
                # Load the pipeline
                pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")

                # Define the message structure
                messages = [
                    {
                        "role": "system",
                        "content": "You are a personal corporate trainer that teaches me how to perform well in my organization.",
                    },
                    {"role": "user", "content": user_message},
                ]

                # Generate the prompt
                prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)

                # Generate the response
                outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)

                # Display the response
                st.text_area("Chatbot Response:", outputs[0]["generated_text"], height=200)
        else:
            st.warning("Please enter a message to get a response.")

if __name__ == "__main__":
    main()