warhawkmonk commited on
Commit
e878e23
·
verified ·
1 Parent(s): 80c9658

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -45
app.py CHANGED
@@ -39,54 +39,54 @@ from streamlit_pdf_viewer import pdf_viewer
39
 
40
 
41
 
42
- def consume_llm_api(prompt):
43
- """
44
- Sends a prompt to the LLM API and processes the streamed response.
45
- """
46
- url = "https://8417-201-238-124-65.ngrok-free.app/api/llm-response"
47
- headers = {"Content-Type": "application/json"}
48
- payload = {"prompt": prompt}
49
-
50
- try:
51
- print("Sending prompt to the LLM API...")
52
- with requests.post(url, json=payload, headers=headers, stream=True) as response:
53
- response.raise_for_status()
54
- print("Response from LLM API:\n")
55
- for line in response:
56
- yield(line.decode('utf-8'))
57
- # print(type(response))
58
- # yield(response)
59
- except requests.RequestException as e:
60
- print(f"Error consuming API: {e}")
61
- except Exception as e:
62
- print(f"Unexpected error: {e}")
63
  # def consume_llm_api(prompt):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
- # client = Groq(
66
- # api_key="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"
67
- # )
68
 
69
- # completion = client.chat.completions.create(
70
 
71
- # model="llama-3.3-70b-versatile",
72
- # messages=[
73
-
74
- # {
75
- # "role": "system",
76
- # "content": prompt
77
- # },
78
- # ],
79
-
80
- # temperature=1,
81
- # # max_completion_tokens=1024,
82
- # top_p=1,
83
- # stream=True,
84
- # stop=None,
85
- # )
86
-
87
- # for chunk in completion:
88
- # if chunk.choices[0].delta.content:
89
- # yield chunk.choices[0].delta.content
90
  @st.cache_resource
91
  def encoding_model():
92
  """
@@ -351,7 +351,7 @@ with column2:
351
  for index,prompts_ in enumerate(dictionary['every_prompt_with_val'][::-1]):
352
  if prompts_[-1]=="@working":
353
  if index==0:
354
-
355
  data_need=""
356
  while(len(data_need)==0):
357
  if len(prompts_)==3:
 
39
 
40
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  # def consume_llm_api(prompt):
43
+ # """
44
+ # Sends a prompt to the LLM API and processes the streamed response.
45
+ # """
46
+ # url = "https://8417-201-238-124-65.ngrok-free.app/api/llm-response"
47
+ # headers = {"Content-Type": "application/json"}
48
+ # payload = {"prompt": prompt}
49
+
50
+ # try:
51
+ # print("Sending prompt to the LLM API...")
52
+ # with requests.post(url, json=payload, headers=headers, stream=True) as response:
53
+ # response.raise_for_status()
54
+ # print("Response from LLM API:\n")
55
+ # for line in response:
56
+ # yield(line.decode('utf-8'))
57
+ # # print(type(response))
58
+ # # yield(response)
59
+ # except requests.RequestException as e:
60
+ # print(f"Error consuming API: {e}")
61
+ # except Exception as e:
62
+ # print(f"Unexpected error: {e}")
63
+ def consume_llm_api(prompt):
64
 
65
+ client = Groq(
66
+ api_key="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"
67
+ )
68
 
69
+ completion = client.chat.completions.create(
70
 
71
+ model="llama-3.3-70b-versatile",
72
+ messages=[
73
+
74
+ {
75
+ "role": "system",
76
+ "content": prompt
77
+ },
78
+ ],
79
+
80
+ temperature=1,
81
+ # max_completion_tokens=1024,
82
+ top_p=1,
83
+ stream=True,
84
+ stop=None,
85
+ )
86
+
87
+ for chunk in completion:
88
+ if chunk.choices[0].delta.content:
89
+ yield chunk.choices[0].delta.content
90
  @st.cache_resource
91
  def encoding_model():
92
  """
 
351
  for index,prompts_ in enumerate(dictionary['every_prompt_with_val'][::-1]):
352
  if prompts_[-1]=="@working":
353
  if index==0:
354
+ st.write(prompts_[0].split(send_prompt())[-1].upper() if send_prompt() in prompts_[0] else prompts_[0].upper())
355
  data_need=""
356
  while(len(data_need)==0):
357
  if len(prompts_)==3: