TheBobBob commited on
Commit
b57ab6f
·
verified ·
1 Parent(s): 47a1bbf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -16
app.py CHANGED
@@ -2,9 +2,9 @@ import os
2
  import requests
3
  import tellurium as te
4
  import tempfile
 
5
  import streamlit as st
6
  from langchain_text_splitters import CharacterTextSplitter
7
- from transformers import pipeline
8
  import chromadb
9
 
10
  # Constants and global variables
@@ -16,10 +16,6 @@ LOCAL_DOWNLOAD_DIR = tempfile.mkdtemp()
16
  cached_data = None
17
  db = None
18
 
19
- # Initialize Hugging Face model pipelines
20
- summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
21
- llm = pipeline("text-generation", model="gpt2")
22
-
23
  def fetch_github_json():
24
  url = f"https://api.github.com/repos/{GITHUB_OWNER}/{GITHUB_REPO_CACHE}/contents/{BIOMODELS_JSON_DB_PATH}"
25
  headers = {"Accept": "application/vnd.github+json"}
@@ -144,15 +140,20 @@ def create_vector_db(final_items):
144
  metadata={"hnsw:space": "cosine"}
145
  )
146
  documents = []
147
- print("VectorDB successfully created.")
148
  for item in final_items:
149
  prompt = f"""
150
- Summarize the following segment of Antimony:
151
- {item}
 
 
 
 
 
152
  """
153
- response = summarizer(prompt, max_length=150, min_length=30, do_sample=False)
154
- summary = response[0]['summary_text']
155
- documents.append(summary)
156
 
157
  if final_items:
158
  db.add(
@@ -170,14 +171,25 @@ def generate_response(db, query_text, previous_context):
170
  if not query_results.get('documents'):
171
  return "No results found."
172
 
173
- best_recommendation = query_results['documents'][0]
174
 
175
  prompt_template = f"""
176
- Using the context below, answer the following question: {query_text}
177
- Context: {previous_context} {best_recommendation}
 
 
 
 
 
 
 
 
 
 
 
178
  """
179
- response = llm(prompt_template, max_length=150)
180
- final_response = response[0]['generated_text']
181
  return final_response
182
 
183
  def streamlit_app():
 
2
  import requests
3
  import tellurium as te
4
  import tempfile
5
+ import ollama
6
  import streamlit as st
7
  from langchain_text_splitters import CharacterTextSplitter
 
8
  import chromadb
9
 
10
  # Constants and global variables
 
16
  cached_data = None
17
  db = None
18
 
 
 
 
 
19
  def fetch_github_json():
20
  url = f"https://api.github.com/repos/{GITHUB_OWNER}/{GITHUB_REPO_CACHE}/contents/{BIOMODELS_JSON_DB_PATH}"
21
  headers = {"Accept": "application/vnd.github+json"}
 
140
  metadata={"hnsw:space": "cosine"}
141
  )
142
  documents = []
143
+
144
  for item in final_items:
145
  prompt = f"""
146
+ Summarize the following segment of Antimony in a clear and concise manner:
147
+ 1. Provide a detailed summary using a limited number of words
148
+ 2. Maintain all original values and include any mathematical expressions or values in full.
149
+ 3. Ensure that all variable names and their values are clearly presented.
150
+ 4. Write the summary in paragraph format, putting an emphasis on clarity and completeness.
151
+
152
+ Here is the antimony segment to summarize: {item}
153
  """
154
+ documents5 = ollama.generate(model="llama3", prompt=prompt)
155
+ documents2 = documents5['response']
156
+ documents.append(documents2)
157
 
158
  if final_items:
159
  db.add(
 
171
  if not query_results.get('documents'):
172
  return "No results found."
173
 
174
+ best_recommendation = query_results['documents']
175
 
176
  prompt_template = f"""
177
+ Using the context provided below, answer the following question. If the information is insufficient to answer the question, please state that clearly.
178
+
179
+ Context:
180
+ {previous_context} {best_recommendation}
181
+
182
+ Instructions:
183
+ 1. Cross-Reference: Use all provided context to define variables and identify any unknown entities.
184
+ 2. Mathematical Calculations: Perform any necessary calculations based on the context and available data.
185
+ 3. Consistency: Remember and incorporate previous responses if the question is related to earlier information.
186
+
187
+ Question:
188
+ {query_text}
189
+
190
  """
191
+ response = ollama.generate(model="llama3", prompt=prompt_template)
192
+ final_response = response.get('response', 'No response generated')
193
  return final_response
194
 
195
  def streamlit_app():