File size: 5,049 Bytes
8bfa09b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
from langchain.llms import HuggingFacePipeline, LlamaCpp,CTransformers
# from langchain.callbacks.base import BaseCallbackHandler
import streamlit as st
from streamlit.components.v1 import html
import streamlit.components.v1 as components
from streamlit_chat import message
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.callbacks import StreamlitCallbackHandler
# st_callback = StreamlitCallbackHandler(st.container())
import textwrap

st.title("Affine-LocalGPT")

history=[]

# Default Sys Prompt
DEFAULT_SYSTEM_PROMPT = "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe.  Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."

with st.sidebar:
    model_name=st.selectbox("Select Model :-",['Llama 7B','Llama 13B'])
    temperature=st.slider("Temperature :-",0.0,1.0,0.1)
    top_p=st.slider("top_p :-",0.0,1.0,0.95)
    top_k=st.slider("top_k :- ",0,100,50)
    DEFAULT_SYSTEM_PROMPT=st.text_area("System Prompt :-",f"{DEFAULT_SYSTEM_PROMPT}",height=400)

# Load the selected model
if model_name=="Llama 7B":
    print("Llama 7B model Loading")
    model_path='llama-2-7b-chat.ggmlv3.q4_0.bin'
else:
    print("Llama 13B model Loading")
    model_path="llama-2-13b-chat.ggmlv3.q2_K.bin"

# prompt special tokens
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"


# create the custom prompt
def get_prompt(
    message: str, chat_history: list[tuple[str, str]], system_prompt: str
) -> str:
    texts = [f"[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n"]
    for user_input, response in chat_history:
        texts.append(f"{user_input.strip()} [/INST] {response.strip()} </s><s> [INST] ")
    texts.append(f"{message.strip()} [/INST]")
    return "".join(texts)

## Load the Local Llama 2 model
def llama_model(model_path=None,model_type=None,max_new_tokens=None,temperature=None):
    llm = CTransformers(
        model = model_path, 
        model_type="llama",
        max_new_tokens =1024,
        temperature = temperature,
        streaming=True,
        callbacks=[StreamingStdOutCallbackHandler()]
    )
    return llm

print(f"{model_name} Model Loading start")
model=llama_model(model_path=model_path,temperature=temperature)
print(f"{model_name}Load Model Successfully.")

# if 'prompts' not in st.session_state:
#     st.session_state.prompts = []
# if 'responses' not in st.session_state:
#     st.session_state.responses = []

if "messages" not in st.session_state:
    st.session_state.messages = []

for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

if prompt := st.chat_input("What is up?"):
        st.session_state.messages.append({"role": "user", "content": prompt})
        with st.chat_message("user"):
            st.markdown(prompt)
            final_prompt=get_prompt(prompt,history,DEFAULT_SYSTEM_PROMPT)

        with st.chat_message("assistant"):
            message_placeholder = st.empty()
            full_response = ""
            for response in model(final_prompt):
                full_response += response
                message_placeholder.markdown(response + "▌")
            wrapped_text = textwrap.fill(full_response, width=100)
            message_placeholder.markdown(wrapped_text)
        st.session_state.messages.append(
            {"role": "assistant", "content": full_response}
        )


# while True:
#     message=input("Enter your query here...")
#     prompt=get_prompt(message,history,DEFAULT_SYSTEM_PROMPT)
#     ans=model.predict(prompt)
#     print(ans)



# def send_click():
#     print("Start..")
#     if st.session_state.user != '':
#         message = st.session_state.user
#         if message:
#             prompt=get_prompt(message,history,DEFAULT_SYSTEM_PROMPT)
#             response=model.predict(prompt)
#             st.session_state.prompts.append(message)
#             st.session_state.responses.append(response)

# st.text_input("Ask your query here :", key="user")
# st.button("Send", on_click=send_click)

# if st.session_state.prompts:
#     for i in range(len(st.session_state.responses)-1, -1, -1):
#         message(st.session_state.responses[i], key=str(i), seed='Milo')
#         message(st.session_state.prompts[i], is_user=True, key=str(i) + '_user', seed=83)

# if st.session_state.messages[-1]["role"] != "assistant":
#     with st.chat_message("assistant"):
#         with st.spinner("Thinking..."):
#             response = generate_response(prompt) 
#             st.write(response) 
#     message = {"role": "assistant", "content": response}
#     st.session_state.messages.append(message)