warhawkmonk commited on
Commit
76229fa
·
verified ·
1 Parent(s): cf01abd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -74
app.py CHANGED
@@ -39,54 +39,54 @@ from streamlit_pdf_viewer import pdf_viewer
39
 
40
 
41
 
42
- # def consume_llm_api(prompt):
43
- # """
44
- # Sends a prompt to the LLM API and processes the streamed response.
45
- # """
46
- # url = "https://8417-201-238-124-65.ngrok-free.app/api/llm-response"
47
- # headers = {"Content-Type": "application/json"}
48
- # payload = {"prompt": prompt}
49
-
50
- # try:
51
- # print("Sending prompt to the LLM API...")
52
- # with requests.post(url, json=payload, headers=headers, stream=True) as response:
53
- # response.raise_for_status()
54
- # print("Response from LLM API:\n")
55
- # for line in response:
56
- # yield(line.decode('utf-8'))
57
- # # print(type(response))
58
- # # yield(response)
59
- # except requests.RequestException as e:
60
- # print(f"Error consuming API: {e}")
61
- # except Exception as e:
62
- # print(f"Unexpected error: {e}")
63
  def consume_llm_api(prompt):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
- client = Groq(
66
- api_key="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"
67
- )
68
 
69
- completion = client.chat.completions.create(
70
 
71
- model="llama-3.3-70b-versatile",
72
- messages=[
73
-
74
- {
75
- "role": "system",
76
- "content": prompt
77
- },
78
- ],
79
-
80
- temperature=1,
81
- # max_completion_tokens=1024,
82
- top_p=1,
83
- stream=True,
84
- stop=None,
85
- )
86
-
87
- for chunk in completion:
88
- if chunk.choices[0].delta.content:
89
- yield chunk.choices[0].delta.content
90
  @st.cache_resource
91
  def encoding_model():
92
  """
@@ -111,37 +111,9 @@ def dataframe_info(data):
111
  value= data[:5]
112
  return str(value)
113
 
114
- # def extract_python_code(text):
115
- # """
116
- # Extracts code blocks from a given text.
117
- # Supports triple-backtick blocks, indented code, and inline code.
118
- # """
119
- # code_snippets = []
120
-
121
- # # Extract triple-backtick code blocks
122
- # triple_backtick_blocks = re.findall(r"```(?:[\w+\-]*)\n(.*?)```", text, re.DOTALL)
123
- # code_snippets.extend(triple_backtick_blocks)
124
-
125
- # # Extract indented code blocks (4 spaces or tab)
126
- # indented_blocks = re.findall(r"(?:^|\n)((?: |\t).+(\n(?: |\t).+)*)", text)
127
- # code_snippets.extend([block[0] for block in indented_blocks])
128
-
129
- # # Extract inline code snippets with single backticks
130
- # inline_code = re.findall(r"`([^`\n]+)`", text)
131
- # code_snippets.extend(inline_code)
132
-
133
- # return [snippet.strip() for snippet in code_snippets if snippet.strip()]
134
  def extract_python_code(text):
135
- # Match triple backtick Python code blocks ```python ... ```
136
- code_blocks = re.findall(r"```(?:python)?\n(.*?)```", text, re.DOTALL)
137
-
138
- # If no triple-backtick code found, try to match indented blocks (e.g. 4 spaces)
139
- if not code_blocks:
140
- code_blocks = re.findall(r"(?:^|\n)( .+?)(?=\n\S|\Z)", text, re.DOTALL)
141
-
142
- # Clean indentation if needed
143
- code_blocks = [re.sub(r"^\s{4}", "", block, flags=re.MULTILINE) for block in code_blocks]
144
- return code_blocks
145
 
146
  # @st.cache_resource
147
  def run_code_blocks(code_blocks,df):
 
39
 
40
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def consume_llm_api(prompt):
43
+ """
44
+ Sends a prompt to the LLM API and processes the streamed response.
45
+ """
46
+ url = "https://8417-201-238-124-65.ngrok-free.app/api/llm-response"
47
+ headers = {"Content-Type": "application/json"}
48
+ payload = {"prompt": prompt}
49
+
50
+ try:
51
+ print("Sending prompt to the LLM API...")
52
+ with requests.post(url, json=payload, headers=headers, stream=True) as response:
53
+ response.raise_for_status()
54
+ print("Response from LLM API:\n")
55
+ for line in response:
56
+ yield(line.decode('utf-8'))
57
+ # print(type(response))
58
+ # yield(response)
59
+ except requests.RequestException as e:
60
+ print(f"Error consuming API: {e}")
61
+ except Exception as e:
62
+ print(f"Unexpected error: {e}")
63
+ # def consume_llm_api(prompt):
64
 
65
+ # client = Groq(
66
+ # api_key="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"
67
+ # )
68
 
69
+ # completion = client.chat.completions.create(
70
 
71
+ # model="llama-3.3-70b-versatile",
72
+ # messages=[
73
+
74
+ # {
75
+ # "role": "system",
76
+ # "content": prompt
77
+ # },
78
+ # ],
79
+
80
+ # temperature=1,
81
+ # # max_completion_tokens=1024,
82
+ # top_p=1,
83
+ # stream=True,
84
+ # stop=None,
85
+ # )
86
+
87
+ # for chunk in completion:
88
+ # if chunk.choices[0].delta.content:
89
+ # yield chunk.choices[0].delta.content
90
  @st.cache_resource
91
  def encoding_model():
92
  """
 
111
  value= data[:5]
112
  return str(value)
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  def extract_python_code(text):
115
+ code_block=text.split("```")
116
+ return [code_block[1]]
 
 
 
 
 
 
 
 
117
 
118
  # @st.cache_resource
119
  def run_code_blocks(code_blocks,df):