Danielrahmai1991 commited on
Commit
4173e6d
·
verified ·
1 Parent(s): f856103

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -12
app.py CHANGED
@@ -43,8 +43,10 @@ gpu_llm = HuggingFacePipeline(
43
 
44
  )
45
  from langchain_core.prompts import PromptTemplate
 
 
46
 
47
- alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
48
 
49
  ### Instruction:
50
  {question}
@@ -55,25 +57,87 @@ alpaca_prompt = """Below is an instruction that describes a task, paired with an
55
  ### Response:
56
  """
57
 
58
- prompt = PromptTemplate.from_template(alpaca_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- gpu_chain = prompt | gpu_llm.bind(stop=["\n\n"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  # question = "give me suggestion about inevstment"
63
 
64
  def greet(question, model_type):
65
  print(f"question is {question}")
66
  if model_type == "With memory":
67
- response_of_llm = gpu_chain.invoke({"question": question})
68
- print("creating model created")
69
  else:
70
- template = """You are the Finiantial expert:
71
- ### Instruction:
72
- {question}
73
- ### Input:
74
- ### Response:
75
- """
76
- response_of_llm = gpu_chain.invoke({"question": question})
 
 
 
 
 
 
77
 
78
  print(f"out is: {response_of_llm}")
79
  return response_of_llm
 
43
 
44
  )
45
  from langchain_core.prompts import PromptTemplate
46
+ from langchain.chains import LLMChain
47
+ from langchain.schema import HumanMessage, SystemMessage, AIMessage
48
 
49
+ alpaca_prompt_simple = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
50
 
51
  ### Instruction:
52
  {question}
 
57
  ### Response:
58
  """
59
 
60
+ prompt = PromptTemplate.from_template(alpaca_prompt_simple)
61
+ llm_chain_model = LLMChain(prompt=prompt, llm=gpu_llm.bind(skip_prompt=True))
62
+
63
+
64
+ from langchain.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate
65
+
66
+
67
+
68
+ examples = [
69
+ {
70
+ "query": "what is forex?",
71
+ "answer": "Forex is an abbreviation for foreign exchange. It involves trading currencies from different countries with one another at the current market price."
72
+ },
73
+ ]
74
+ example_prompt = ChatPromptTemplate.from_messages(
75
+ [
76
+ ("human", "{query}"),
77
+ ("ai", "{answer}"),
78
+ ]
79
+ )
80
+
81
+
82
+ few_shot_prompt = FewShotChatMessagePromptTemplate(
83
+ example_prompt=example_prompt,
84
+ examples=examples,
85
+ )
86
+
87
+
88
+
89
+
90
+ # with memory
91
+ from langchain_core.prompts import PromptTemplate
92
+ from langchain.memory import ConversationBufferMemory
93
 
94
+ alpaca_prompt_memory = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
95
+ {chat_history}
96
+
97
+ ### Instruction:
98
+
99
+ {question}
100
+
101
+
102
+
103
+ ### Input:
104
+
105
+ ### Response:
106
+ """
107
+
108
+ prompt = PromptTemplate(
109
+ input_variables=["chat_history", "question"], template=alpaca_prompt_memory
110
+ )
111
+ memory = ConversationBufferMemory(memory_key="chat_history")
112
+
113
+ llm_chain_memory = LLMChain(
114
+ llm=gpu_llm.bind(skip_prompt=True),
115
+ prompt=prompt,
116
+ verbose=True,
117
+ memory=memory,
118
+ )
119
 
120
  # question = "give me suggestion about inevstment"
121
 
122
  def greet(question, model_type):
123
  print(f"question is {question}")
124
  if model_type == "With memory":
125
+ print("With memory")
126
+ response_of_llm = llm_chain_memory.predict(question=question)
127
  else:
128
+ print("Without memory")
129
+ query = question
130
+ final_prompt = ChatPromptTemplate.from_messages(
131
+ [
132
+ ("system", "You are a financial ai assitant "),
133
+ few_shot_prompt,
134
+ ("human", "{userInput}"),
135
+ ]
136
+ )
137
+ messages = final_prompt.format(userInput=query)
138
+
139
+ ai_out = llm_chain_model.invoke(messages)
140
+ response_of_llm = ai_out['text']
141
 
142
  print(f"out is: {response_of_llm}")
143
  return response_of_llm