datascientist22 commited on
Commit
b7471e0
·
verified ·
1 Parent(s): ffebd4e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -110,7 +110,12 @@ if 'chat_history' not in st.session_state:
110
  class CustomLanguageModel:
111
  def generate(self, prompt, context):
112
  # Implement logic to generate a response based on prompt and context
113
- return f"Generated response based on prompt: '{prompt}' and context: '{context}'."
 
 
 
 
 
114
 
115
  # Define a callable class for RAGPrompt
116
  class RAGPrompt:
@@ -163,7 +168,7 @@ if st.button("Submit Query"):
163
  # Apply the prompt directly to the data (no chaining using `|`)
164
  prompt_data = prompt({"question": query, "context": context})
165
 
166
- # Generate the response using the language model
167
  result = custom_llm.generate(prompt_data["question"], prompt_data["context"])
168
 
169
  # Store query and response in session for chat history
 
110
  class CustomLanguageModel:
111
  def generate(self, prompt, context):
112
  # Implement logic to generate a response based on prompt and context
113
+ return f"Generated response: '{prompt}'. Key points from the context: '{self.summarize_context(context)}'."
114
+
115
+ def summarize_context(self, context):
116
+ # Summarize the context to extract key information
117
+ # You could use an NLP summarization model for a more sophisticated approach
118
+ return " ".join(context.split()[:100]) # Returning the first 100 words as a simple summary
119
 
120
  # Define a callable class for RAGPrompt
121
  class RAGPrompt:
 
168
  # Apply the prompt directly to the data (no chaining using `|`)
169
  prompt_data = prompt({"question": query, "context": context})
170
 
171
+ # Generate the response using the language model, focusing on the answer from the retrieved context
172
  result = custom_llm.generate(prompt_data["question"], prompt_data["context"])
173
 
174
  # Store query and response in session for chat history