muzammil-eds commited on
Commit
5c2df7b
·
1 Parent(s): 83bfcfd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -8
app.py CHANGED
@@ -5,16 +5,33 @@ import pandas as pd
5
  from langchain.agents import create_csv_agent, AgentType
6
  from langchain.chat_models import ChatOpenAI
7
  from htmlTemplates import css, user_template, bot_template
 
 
 
8
 
9
- # Set OpenAI API Key
10
- os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- # Initialize LangChain ChatOpenAI agent
13
- llm = ChatOpenAI(
14
- model='gpt-3.5-turbo',
15
- max_tokens=500,
16
- temperature=0.7,
17
- )
18
 
19
  def init_ses_states():
20
  st.session_state.setdefault('chat_history', [])
 
5
  from langchain.agents import create_csv_agent, AgentType
6
  from langchain.chat_models import ChatOpenAI
7
  from htmlTemplates import css, user_template, bot_template
8
+ from langchain.llms.base import LLM
9
+ from typing import Optional, List
10
+ import g4f
11
 
12
+ class FinLLM(LLM):
13
+
14
+ @property
15
+ def _llm_type(self) -> str:
16
+ return "custom"
17
+
18
+ def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
19
+ out = g4f.ChatCompletion.create(
20
+ model="gpt-3.5-turbo",
21
+ messages=[{"role": "user", "content": prompt}],
22
+ temperature=0.5, # You can adjust parameters as needed
23
+ max_tokens=350 # Adjust the token limit as needed
24
+ ) #
25
+ if stop:
26
+ stop_indexes = (out.find(s) for s in stop if s in out)
27
+ min_stop = min(stop_indexes, default=-1)
28
+ if min_stop > -1:
29
+ out = out[:min_stop]
30
+ return out
31
+
32
+
33
+ llm = FinLLM()
34
 
 
 
 
 
 
 
35
 
36
  def init_ses_states():
37
  st.session_state.setdefault('chat_history', [])