warhawkmonk commited on
Commit
1766887
·
verified ·
1 Parent(s): e878e23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -44
app.py CHANGED
@@ -39,54 +39,54 @@ from streamlit_pdf_viewer import pdf_viewer
39
 
40
 
41
 
42
- # def consume_llm_api(prompt):
43
- # """
44
- # Sends a prompt to the LLM API and processes the streamed response.
45
- # """
46
- # url = "https://8417-201-238-124-65.ngrok-free.app/api/llm-response"
47
- # headers = {"Content-Type": "application/json"}
48
- # payload = {"prompt": prompt}
49
-
50
- # try:
51
- # print("Sending prompt to the LLM API...")
52
- # with requests.post(url, json=payload, headers=headers, stream=True) as response:
53
- # response.raise_for_status()
54
- # print("Response from LLM API:\n")
55
- # for line in response:
56
- # yield(line.decode('utf-8'))
57
- # # print(type(response))
58
- # # yield(response)
59
- # except requests.RequestException as e:
60
- # print(f"Error consuming API: {e}")
61
- # except Exception as e:
62
- # print(f"Unexpected error: {e}")
63
  def consume_llm_api(prompt):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
- client = Groq(
66
- api_key="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"
67
- )
68
 
69
- completion = client.chat.completions.create(
70
 
71
- model="llama-3.3-70b-versatile",
72
- messages=[
73
-
74
- {
75
- "role": "system",
76
- "content": prompt
77
- },
78
- ],
79
-
80
- temperature=1,
81
- # max_completion_tokens=1024,
82
- top_p=1,
83
- stream=True,
84
- stop=None,
85
- )
86
-
87
- for chunk in completion:
88
- if chunk.choices[0].delta.content:
89
- yield chunk.choices[0].delta.content
90
  @st.cache_resource
91
  def encoding_model():
92
  """
 
39
 
40
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def consume_llm_api(prompt):
43
+ """
44
+ Sends a prompt to the LLM API and processes the streamed response.
45
+ """
46
+ url = "https://8417-201-238-124-65.ngrok-free.app/api/llm-response"
47
+ headers = {"Content-Type": "application/json"}
48
+ payload = {"prompt": prompt}
49
+
50
+ try:
51
+ print("Sending prompt to the LLM API...")
52
+ with requests.post(url, json=payload, headers=headers, stream=True) as response:
53
+ response.raise_for_status()
54
+ print("Response from LLM API:\n")
55
+ for line in response:
56
+ yield(line.decode('utf-8'))
57
+ # print(type(response))
58
+ # yield(response)
59
+ except requests.RequestException as e:
60
+ print(f"Error consuming API: {e}")
61
+ except Exception as e:
62
+ print(f"Unexpected error: {e}")
63
+ # def consume_llm_api(prompt):
64
 
65
+ # client = Groq(
66
+ # api_key="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"
67
+ # )
68
 
69
+ # completion = client.chat.completions.create(
70
 
71
+ # model="llama-3.3-70b-versatile",
72
+ # messages=[
73
+
74
+ # {
75
+ # "role": "system",
76
+ # "content": prompt
77
+ # },
78
+ # ],
79
+
80
+ # temperature=1,
81
+ # # max_completion_tokens=1024,
82
+ # top_p=1,
83
+ # stream=True,
84
+ # stop=None,
85
+ # )
86
+
87
+ # for chunk in completion:
88
+ # if chunk.choices[0].delta.content:
89
+ # yield chunk.choices[0].delta.content
90
  @st.cache_resource
91
  def encoding_model():
92
  """