File size: 2,640 Bytes
fe5256f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dbc8314
fe5256f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86

from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain import PromptTemplate
from langchain_community.llms import LlamaCpp
#from langchain.chains import RetrievalQA
#from langchain_community.embeddings import SentenceTransformerEmbeddings

from langchain_core.prompts import ChatPromptTemplate
from langchain.callbacks.base import BaseCallbackHandler

#from langchain.schema import HumanMessage

import os
import json,streamlit as st
from pathlib import Path

class StreamHandler(BaseCallbackHandler):
    def __init__(self, container, initial_text=""):
        self.container = container
        self.text=initial_text
    def on_llm_new_token(self, token: str, **kwargs) -> None:
        # "/" is a marker to show difference 
        # you don't need it 
        #self.text+=token+"/" 
        self.text+=token
        self.container.markdown(self.text) 

st.title("Prompt Engineer")

# Main chat form
with st.form("chat_form"):
    query = st.text_input("Enter the topic you want to generate prompt for?: ")
    #LLM_Summary = st.checkbox('Summarize results with LLM') 
    submit_button = st.form_submit_button("Send")    

     
    template = """
    <s>[INST] <<SYS>>
    Act as a patent advisor by providing subject matter expertise on any topic. Provide detailed and elaborate answers
    <</SYS>>

    {text} [/INST]
    """
    response=""
    prompt = PromptTemplate(
        input_variables=["text"],
        template=template,
    )
    text = "Help me create a good prompt for the following: Information that is needed to file a US patent application for " + query
    #print(prompt.format(text=query))

    # Callbacks support token-wise streaming
    callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])

    #model_path = "C:\Rajesh\AI-ML-Training\LLM\llama-2-7b.Q4_K_M.gguf"\
    model_path = "zephyr-7b-beta.Q4_K_M.gguf"
    chat_box=st.empty() 
    stream_handler = StreamHandler(chat_box)
    
    llm = LlamaCpp(
        model_path=model_path,
        temperature=0.8,
        max_tokens=500,
        top_p=1,
        #streaming=True,
        #callback_manager=callback_manager,
        callback_manager = [stream_handler],
        verbose=True,  # Verbose is required to pass to the callback manager
    )

if submit_button:
    #st.write("Fetching results..\n")
    output = llm.invoke(prompt.format(text=text))
    #response = response+output
    #st.write(response)
    #response = output([HumanMessage(content=query)])    
    #llm_response = output.content
    #st.markdown(output)