warhawkmonk commited on
Commit
10719a6
·
verified ·
1 Parent(s): 5f9c583

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -1
app.py CHANGED
@@ -11,6 +11,7 @@ from langchain.schema import Document
11
  from sentence_transformers import SentenceTransformer,util
12
  from streamlit_image_select import image_select
13
  import os
 
14
  import PyPDF2
15
  import requests
16
  from streamlit_navigation_bar import st_navbar
@@ -32,7 +33,7 @@ def consume_llm_api(prompt):
32
  """
33
  Sends a prompt to the LLM API and processes the streamed response.
34
  """
35
- url = "https://hot-eyes-shop.loca.lt/api/llm-response"
36
  headers = {"Content-Type": "application/json"}
37
  payload = {"prompt": prompt}
38
 
@@ -477,7 +478,39 @@ with column1:
477
  negative_prompt="the black masked area"
478
 
479
  # run=st.button("run_experiment")
 
 
 
 
 
 
 
480
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
481
  if bg_doc and prompt:
482
  query_embedding = model.encode([prompt])
483
  retrieved_chunks = max([(util.cos_sim(match[0],query_embedding),match[-1])for match in vector_store])[-1]
 
11
  from sentence_transformers import SentenceTransformer,util
12
  from streamlit_image_select import image_select
13
  import os
14
+ import fitz
15
  import PyPDF2
16
  import requests
17
  from streamlit_navigation_bar import st_navbar
 
33
  """
34
  Sends a prompt to the LLM API and processes the streamed response.
35
  """
36
+ url = "https://wise-eagles-send.loca.lt/api/llm-response"
37
  headers = {"Content-Type": "application/json"}
38
  payload = {"prompt": prompt}
39
 
 
478
  negative_prompt="the black masked area"
479
 
480
  # run=st.button("run_experiment")
481
+ if bg_doc:
482
+ if len(dictionary['every_prompt_with_val'])==0:
483
+ query_embedding = model.encode(["something"])
484
+ else:
485
+
486
+ query_embedding = model.encode([dictionary['every_prompt_with_val'][-1][0]])
487
+ retrieved_chunks = max([(util.cos_sim(match[0],query_embedding),match[-1])for match in vector_store])[-1]
488
 
489
+
490
+
491
+ with implementation:
492
+
493
+ text_lookup=retrieved_chunks
494
+ pages=[]
495
+ with fitz.open("temp.pdf") as doc:
496
+ page_number = st.sidebar.number_input(
497
+ "Page number", min_value=1, max_value=doc.page_count, value=1, step=1
498
+
499
+
500
+ )
501
+ for page_no in range(doc.page_count):
502
+ pages.append(doc.load_page(page_no - 1))
503
+
504
+ # areas = pages[page_number-1].search_for(text_lookup)
505
+ with st.container(height=int(screen_height//1.8)):
506
+ for pg_no in pages[::-1]:
507
+ areas = pg_no.search_for(text_lookup)
508
+ for area in areas:
509
+ pg_no.add_rect_annot(area)
510
+
511
+ pix = pg_no.get_pixmap(dpi=100).tobytes()
512
+ st.image(pix,use_column_width=True)
513
+
514
  if bg_doc and prompt:
515
  query_embedding = model.encode([prompt])
516
  retrieved_chunks = max([(util.cos_sim(match[0],query_embedding),match[-1])for match in vector_store])[-1]