warhawkmonk commited on
Commit
89a999b
·
verified ·
1 Parent(s): 64b8da2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -27
app.py CHANGED
@@ -29,36 +29,55 @@ import streamlit.components.v1 as components
29
  from datetime import datetime
30
  from streamlit_js_eval import streamlit_js_eval
31
  from streamlit_pdf_viewer import pdf_viewer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  def consume_llm_api(prompt):
33
- """
34
- Sends a prompt to the LLM API and processes the streamed response.
35
- """
36
- url = "https://8417-201-238-124-65.ngrok-free.app/api/llm-response"
37
- headers = {"Content-Type": "application/json"}
38
- payload = {"prompt": prompt}
39
 
40
- try:
41
- print("Sending prompt to the LLM API...")
42
- with requests.post(url, json=payload, headers=headers, stream=True) as response:
43
- response.raise_for_status()
44
- print("Response from LLM API:\n")
45
- for line in response:
46
- yield(line.decode('utf-8'))
47
- # print(type(response))
48
- # yield(response)
49
- except requests.RequestException as e:
50
- print(f"Error consuming API: {e}")
51
- except Exception as e:
52
- print(f"Unexpected error: {e}")
53
 
54
- # def consume_llm_api(prompt):
55
- # llm_stream = llm_text_response()(prompt)
56
-
57
- # # Create a generator to stream the data
58
-
59
- # for chunk in llm_stream:
60
-
61
- # yield chunk
 
 
 
 
 
 
 
 
62
 
63
 
64
  def send_prompt():
 
29
  from datetime import datetime
30
  from streamlit_js_eval import streamlit_js_eval
31
  from streamlit_pdf_viewer import pdf_viewer
32
+ # def consume_llm_api(prompt):
33
+ # """
34
+ # Sends a prompt to the LLM API and processes the streamed response.
35
+ # """
36
+ # url = "https://8417-201-238-124-65.ngrok-free.app/api/llm-response"
37
+ # headers = {"Content-Type": "application/json"}
38
+ # payload = {"prompt": prompt}
39
+
40
+ # try:
41
+ # print("Sending prompt to the LLM API...")
42
+ # with requests.post(url, json=payload, headers=headers, stream=True) as response:
43
+ # response.raise_for_status()
44
+ # print("Response from LLM API:\n")
45
+ # for line in response:
46
+ # yield(line.decode('utf-8'))
47
+ # # print(type(response))
48
+ # # yield(response)
49
+ # except requests.RequestException as e:
50
+ # print(f"Error consuming API: {e}")
51
+ # except Exception as e:
52
+ # print(f"Unexpected error: {e}")
53
+
54
  def consume_llm_api(prompt):
55
+
56
+ client = Groq(
57
+ api_key="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"
58
+ )
 
 
59
 
60
+ completion = client.chat.completions.create(
61
+
62
+ model="llama-3.3-70b-versatile",
63
+ messages=[
 
 
 
 
 
 
 
 
 
64
 
65
+ {
66
+ "role": "system",
67
+ "content": prompt
68
+ },
69
+ ],
70
+
71
+ temperature=1,
72
+ # max_completion_tokens=1024,
73
+ top_p=1,
74
+ stream=True,
75
+ stop=None,
76
+ )
77
+
78
+ for chunk in completion:
79
+ if chunk.choices[0].delta.content:
80
+ yield chunk.choices[0].delta.content
81
 
82
 
83
  def send_prompt():